1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTableRS.hpp" 27 #include "gc/shared/genCollectedHeap.hpp" 28 #include "gc/shared/genOopClosures.hpp" 29 #include "gc/shared/generation.hpp" 30 #include "gc/shared/space.inline.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "oops/access.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/atomic.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/os.hpp" 37 #include "utilities/macros.hpp" 38 39 class HasAccumulatedModifiedOopsClosure : public CLDClosure { 40 bool _found; 41 public: 42 HasAccumulatedModifiedOopsClosure() : _found(false) {} 43 void do_cld(ClassLoaderData* cld) { 44 if (_found) { 45 return; 46 } 47 48 if (cld->has_accumulated_modified_oops()) { 49 _found = true; 50 } 51 } 52 bool found() { 53 return _found; 54 } 55 }; 56 57 bool CLDRemSet::mod_union_is_clear() { 58 HasAccumulatedModifiedOopsClosure closure; 59 ClassLoaderDataGraph::cld_do(&closure); 60 61 return !closure.found(); 62 } 63 64 65 class ClearCLDModUnionClosure : public CLDClosure { 66 public: 67 void do_cld(ClassLoaderData* cld) { 68 if (cld->has_accumulated_modified_oops()) { 69 cld->clear_accumulated_modified_oops(); 70 } 71 } 72 }; 73 74 void CLDRemSet::clear_mod_union() { 75 ClearCLDModUnionClosure closure; 76 ClassLoaderDataGraph::cld_do(&closure); 77 } 78 79 80 jbyte CardTableRS::find_unused_youngergenP_card_value() { 81 for (jbyte v = youngergenP1_card; 82 v < cur_youngergen_and_prev_nonclean_card; 83 v++) { 84 bool seen = false; 85 for (int g = 0; g < _regions_to_iterate; g++) { 86 if (_last_cur_val_in_gen[g] == v) { 87 seen = true; 88 break; 89 } 90 } 91 if (!seen) { 92 return v; 93 } 94 } 95 ShouldNotReachHere(); 96 return 0; 97 } 98 99 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 100 // Parallel or sequential, we must always set the prev to equal the 101 // last one written. 102 if (parallel) { 103 // Find a parallel value to be used next. 104 jbyte next_val = find_unused_youngergenP_card_value(); 105 set_cur_youngergen_card_val(next_val); 106 107 } else { 108 // In an sequential traversal we will always write youngergen, so that 109 // the inline barrier is correct. 110 set_cur_youngergen_card_val(youngergen_card); 111 } 112 } 113 114 void CardTableRS::younger_refs_iterate(Generation* g, 115 OopsInGenClosure* blk, 116 uint n_threads) { 117 // The indexing in this array is slightly odd. We want to access 118 // the old generation record here, which is at index 2. 119 _last_cur_val_in_gen[2] = cur_youngergen_card_val(); 120 g->younger_refs_iterate(blk, n_threads); 121 } 122 123 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { 124 if (_is_par) { 125 return clear_card_parallel(entry); 126 } else { 127 return clear_card_serial(entry); 128 } 129 } 130 131 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { 132 while (true) { 133 // In the parallel case, we may have to do this several times. 134 jbyte entry_val = *entry; 135 assert(entry_val != CardTableRS::clean_card_val(), 136 "We shouldn't be looking at clean cards, and this should " 137 "be the only place they get cleaned."); 138 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 139 || _ct->is_prev_youngergen_card_val(entry_val)) { 140 jbyte res = 141 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 142 if (res == entry_val) { 143 break; 144 } else { 145 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 146 "The CAS above should only fail if another thread did " 147 "a GC write barrier."); 148 } 149 } else if (entry_val == 150 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 151 // Parallelism shouldn't matter in this case. Only the thread 152 // assigned to scan the card should change this value. 153 *entry = _ct->cur_youngergen_card_val(); 154 break; 155 } else { 156 assert(entry_val == _ct->cur_youngergen_card_val(), 157 "Should be the only possibility."); 158 // In this case, the card was clean before, and become 159 // cur_youngergen only because of processing of a promoted object. 160 // We don't have to look at the card. 161 return false; 162 } 163 } 164 return true; 165 } 166 167 168 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 169 jbyte entry_val = *entry; 170 assert(entry_val != CardTableRS::clean_card_val(), 171 "We shouldn't be looking at clean cards, and this should " 172 "be the only place they get cleaned."); 173 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 174 "This should be possible in the sequential case."); 175 *entry = CardTableRS::clean_card_val(); 176 return true; 177 } 178 179 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 180 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : 181 _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { 182 } 183 184 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 185 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 186 } 187 188 // The regions are visited in *decreasing* address order. 189 // This order aids with imprecise card marking, where a dirty 190 // card may cause scanning, and summarization marking, of objects 191 // that extend onto subsequent cards. 192 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 193 assert(mr.word_size() > 0, "Error"); 194 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 195 // mr.end() may not necessarily be card aligned. 196 jbyte* cur_entry = _ct->byte_for(mr.last()); 197 const jbyte* limit = _ct->byte_for(mr.start()); 198 HeapWord* end_of_non_clean = mr.end(); 199 HeapWord* start_of_non_clean = end_of_non_clean; 200 while (cur_entry >= limit) { 201 HeapWord* cur_hw = _ct->addr_for(cur_entry); 202 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 203 // Continue the dirty range by opening the 204 // dirty window one card to the left. 205 start_of_non_clean = cur_hw; 206 } else { 207 // We hit a "clean" card; process any non-empty 208 // "dirty" range accumulated so far. 209 if (start_of_non_clean < end_of_non_clean) { 210 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 211 _dirty_card_closure->do_MemRegion(mrd); 212 } 213 214 // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary 215 if (is_word_aligned(cur_entry)) { 216 jbyte* cur_row = cur_entry - BytesPerWord; 217 while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) { 218 cur_row -= BytesPerWord; 219 } 220 cur_entry = cur_row + BytesPerWord; 221 cur_hw = _ct->addr_for(cur_entry); 222 } 223 224 // Reset the dirty window, while continuing to look 225 // for the next dirty card that will start a 226 // new dirty window. 227 end_of_non_clean = cur_hw; 228 start_of_non_clean = cur_hw; 229 } 230 // Note that "cur_entry" leads "start_of_non_clean" in 231 // its leftward excursion after this point 232 // in the loop and, when we hit the left end of "mr", 233 // will point off of the left end of the card-table 234 // for "mr". 235 cur_entry--; 236 } 237 // If the first card of "mr" was dirty, we will have 238 // been left with a dirty window, co-initial with "mr", 239 // which we now process. 240 if (start_of_non_clean < end_of_non_clean) { 241 const MemRegion mrd(start_of_non_clean, end_of_non_clean); 242 _dirty_card_closure->do_MemRegion(mrd); 243 } 244 } 245 246 // clean (by dirty->clean before) ==> cur_younger_gen 247 // dirty ==> cur_youngergen_and_prev_nonclean_card 248 // precleaned ==> cur_youngergen_and_prev_nonclean_card 249 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 250 // cur-younger-gen ==> cur_younger_gen 251 // cur_youngergen_and_prev_nonclean_card ==> no change. 252 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 253 volatile jbyte* entry = byte_for(field); 254 do { 255 jbyte entry_val = *entry; 256 // We put this first because it's probably the most common case. 257 if (entry_val == clean_card_val()) { 258 // No threat of contention with cleaning threads. 259 *entry = cur_youngergen_card_val(); 260 return; 261 } else if (card_is_dirty_wrt_gen_iter(entry_val) 262 || is_prev_youngergen_card_val(entry_val)) { 263 // Mark it as both cur and prev youngergen; card cleaning thread will 264 // eventually remove the previous stuff. 265 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 266 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 267 // Did the CAS succeed? 268 if (res == entry_val) return; 269 // Otherwise, retry, to see the new value. 270 continue; 271 } else { 272 assert(entry_val == cur_youngergen_and_prev_nonclean_card 273 || entry_val == cur_youngergen_card_val(), 274 "should be only possibilities."); 275 return; 276 } 277 } while (true); 278 } 279 280 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 281 OopsInGenClosure* cl, 282 uint n_threads) { 283 verify_used_region_at_save_marks(sp); 284 285 const MemRegion urasm = sp->used_region_at_save_marks(); 286 non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); 287 } 288 289 #ifdef ASSERT 290 void CardTableRS::verify_used_region_at_save_marks(Space* sp) const { 291 MemRegion ur = sp->used_region(); 292 MemRegion urasm = sp->used_region_at_save_marks(); 293 294 assert(ur.contains(urasm), 295 "Did you forget to call save_marks()? " 296 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " 297 "[" PTR_FORMAT ", " PTR_FORMAT ")", 298 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); 299 } 300 #endif 301 302 void CardTableRS::clear_into_younger(Generation* old_gen) { 303 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 304 "Should only be called for the old generation"); 305 // The card tables for the youngest gen need never be cleared. 306 // There's a bit of subtlety in the clear() and invalidate() 307 // methods that we exploit here and in invalidate_or_clear() 308 // below to avoid missing cards at the fringes. If clear() or 309 // invalidate() are changed in the future, this code should 310 // be revisited. 20040107.ysr 311 clear(old_gen->prev_used_region()); 312 } 313 314 void CardTableRS::invalidate_or_clear(Generation* old_gen) { 315 assert(GenCollectedHeap::heap()->is_old_gen(old_gen), 316 "Should only be called for the old generation"); 317 // Invalidate the cards for the currently occupied part of 318 // the old generation and clear the cards for the 319 // unoccupied part of the generation (if any, making use 320 // of that generation's prev_used_region to determine that 321 // region). No need to do anything for the youngest 322 // generation. Also see note#20040107.ysr above. 323 MemRegion used_mr = old_gen->used_region(); 324 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr); 325 if (!to_be_cleared_mr.is_empty()) { 326 clear(to_be_cleared_mr); 327 } 328 invalidate(used_mr); 329 } 330 331 332 class VerifyCleanCardClosure: public OopClosure { 333 private: 334 HeapWord* _boundary; 335 HeapWord* _begin; 336 HeapWord* _end; 337 protected: 338 template <class T> void do_oop_work(T* p) { 339 HeapWord* jp = (HeapWord*)p; 340 assert(jp >= _begin && jp < _end, 341 "Error: jp " PTR_FORMAT " should be within " 342 "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", 343 p2i(jp), p2i(_begin), p2i(_end)); 344 oop obj = RawAccess<>::oop_load(p); 345 guarantee(obj == NULL || (HeapWord*)obj >= _boundary, 346 "pointer " PTR_FORMAT " at " PTR_FORMAT " on " 347 "clean card crosses boundary" PTR_FORMAT, 348 p2i(obj), p2i(jp), p2i(_boundary)); 349 } 350 351 public: 352 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 353 _boundary(b), _begin(begin), _end(end) { 354 assert(b <= begin, 355 "Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, 356 p2i(b), p2i(begin)); 357 assert(begin <= end, 358 "Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, 359 p2i(begin), p2i(end)); 360 } 361 362 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 363 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 364 }; 365 366 class VerifyCTSpaceClosure: public SpaceClosure { 367 private: 368 CardTableRS* _ct; 369 HeapWord* _boundary; 370 public: 371 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 372 _ct(ct), _boundary(boundary) {} 373 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 374 }; 375 376 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 377 CardTableRS* _ct; 378 public: 379 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 380 void do_generation(Generation* gen) { 381 // Skip the youngest generation. 382 if (GenCollectedHeap::heap()->is_young_gen(gen)) { 383 return; 384 } 385 // Normally, we're interested in pointers to younger generations. 386 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 387 gen->space_iterate(&blk, true); 388 } 389 }; 390 391 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 392 // We don't need to do young-gen spaces. 393 if (s->end() <= gen_boundary) return; 394 MemRegion used = s->used_region(); 395 396 jbyte* cur_entry = byte_for(used.start()); 397 jbyte* limit = byte_after(used.last()); 398 while (cur_entry < limit) { 399 if (*cur_entry == clean_card_val()) { 400 jbyte* first_dirty = cur_entry+1; 401 while (first_dirty < limit && 402 *first_dirty == clean_card_val()) { 403 first_dirty++; 404 } 405 // If the first object is a regular object, and it has a 406 // young-to-old field, that would mark the previous card. 407 HeapWord* boundary = addr_for(cur_entry); 408 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 409 HeapWord* boundary_block = s->block_start(boundary); 410 HeapWord* begin = boundary; // Until proven otherwise. 411 HeapWord* start_block = boundary_block; // Until proven otherwise. 412 if (boundary_block < boundary) { 413 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 414 oop boundary_obj = oop(boundary_block); 415 if (!boundary_obj->is_objArray() && 416 !boundary_obj->is_typeArray()) { 417 guarantee(cur_entry > byte_for(used.start()), 418 "else boundary would be boundary_block"); 419 if (*byte_for(boundary_block) != clean_card_val()) { 420 begin = boundary_block + s->block_size(boundary_block); 421 start_block = begin; 422 } 423 } 424 } 425 } 426 // Now traverse objects until end. 427 if (begin < end) { 428 MemRegion mr(begin, end); 429 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 430 for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { 431 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 432 oop(cur)->oop_iterate_no_header(&verify_blk, mr); 433 } 434 } 435 } 436 cur_entry = first_dirty; 437 } else { 438 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 439 // is a transient value, that cannot be in the card table 440 // except during GC, and thus assert that: 441 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 442 // "Illegal CT value"); 443 // That however, need not hold, as will become clear in the 444 // following... 445 446 // We'd normally expect that if we are in the parallel case, 447 // we can't have left a prev value (which would be different 448 // from the current value) in the card table, and so we'd like to 449 // assert that: 450 // guarantee(cur_youngergen_card_val() == youngergen_card 451 // || !is_prev_youngergen_card_val(*cur_entry), 452 // "Illegal CT value"); 453 // That, however, may not hold occasionally, because of 454 // CMS or MSC in the old gen. To wit, consider the 455 // following two simple illustrative scenarios: 456 // (a) CMS: Consider the case where a large object L 457 // spanning several cards is allocated in the old 458 // gen, and has a young gen reference stored in it, dirtying 459 // some interior cards. A young collection scans the card, 460 // finds a young ref and installs a youngergenP_n value. 461 // L then goes dead. Now a CMS collection starts, 462 // finds L dead and sweeps it up. Assume that L is 463 // abutting _unallocated_blk, so _unallocated_blk is 464 // adjusted down to (below) L. Assume further that 465 // no young collection intervenes during this CMS cycle. 466 // The next young gen cycle will not get to look at this 467 // youngergenP_n card since it lies in the unoccupied 468 // part of the space. 469 // Some young collections later the blocks on this 470 // card can be re-allocated either due to direct allocation 471 // or due to absorbing promotions. At this time, the 472 // before-gc verification will fail the above assert. 473 // (b) MSC: In this case, an object L with a young reference 474 // is on a card that (therefore) holds a youngergen_n value. 475 // Suppose also that L lies towards the end of the used 476 // the used space before GC. An MSC collection 477 // occurs that compacts to such an extent that this 478 // card is no longer in the occupied part of the space. 479 // Since current code in MSC does not always clear cards 480 // in the unused part of old gen, this stale youngergen_n 481 // value is left behind and can later be covered by 482 // an object when promotion or direct allocation 483 // re-allocates that part of the heap. 484 // 485 // Fortunately, the presence of such stale card values is 486 // "only" a minor annoyance in that subsequent young collections 487 // might needlessly scan such cards, but would still never corrupt 488 // the heap as a result. However, it's likely not to be a significant 489 // performance inhibitor in practice. For instance, 490 // some recent measurements with unoccupied cards eagerly cleared 491 // out to maintain this invariant, showed next to no 492 // change in young collection times; of course one can construct 493 // degenerate examples where the cost can be significant.) 494 // Note, in particular, that if the "stale" card is modified 495 // after re-allocation, it would be dirty, not "stale". Thus, 496 // we can never have a younger ref in such a card and it is 497 // safe not to scan that card in any collection. [As we see 498 // below, we do some unnecessary scanning 499 // in some cases in the current parallel scanning algorithm.] 500 // 501 // The main point below is that the parallel card scanning code 502 // deals correctly with these stale card values. There are two main 503 // cases to consider where we have a stale "young gen" value and a 504 // "derivative" case to consider, where we have a stale 505 // "cur_younger_gen_and_prev_non_clean" value, as will become 506 // apparent in the case analysis below. 507 // o Case 1. If the stale value corresponds to a younger_gen_n 508 // value other than the cur_younger_gen value then the code 509 // treats this as being tantamount to a prev_younger_gen 510 // card. This means that the card may be unnecessarily scanned. 511 // There are two sub-cases to consider: 512 // o Case 1a. Let us say that the card is in the occupied part 513 // of the generation at the time the collection begins. In 514 // that case the card will be either cleared when it is scanned 515 // for young pointers, or will be set to cur_younger_gen as a 516 // result of promotion. (We have elided the normal case where 517 // the scanning thread and the promoting thread interleave 518 // possibly resulting in a transient 519 // cur_younger_gen_and_prev_non_clean value before settling 520 // to cur_younger_gen. [End Case 1a.] 521 // o Case 1b. Consider now the case when the card is in the unoccupied 522 // part of the space which becomes occupied because of promotions 523 // into it during the current young GC. In this case the card 524 // will never be scanned for young references. The current 525 // code will set the card value to either 526 // cur_younger_gen_and_prev_non_clean or leave 527 // it with its stale value -- because the promotions didn't 528 // result in any younger refs on that card. Of these two 529 // cases, the latter will be covered in Case 1a during 530 // a subsequent scan. To deal with the former case, we need 531 // to further consider how we deal with a stale value of 532 // cur_younger_gen_and_prev_non_clean in our case analysis 533 // below. This we do in Case 3 below. [End Case 1b] 534 // [End Case 1] 535 // o Case 2. If the stale value corresponds to cur_younger_gen being 536 // a value not necessarily written by a current promotion, the 537 // card will not be scanned by the younger refs scanning code. 538 // (This is OK since as we argued above such cards cannot contain 539 // any younger refs.) The result is that this value will be 540 // treated as a prev_younger_gen value in a subsequent collection, 541 // which is addressed in Case 1 above. [End Case 2] 542 // o Case 3. We here consider the "derivative" case from Case 1b. above 543 // because of which we may find a stale 544 // cur_younger_gen_and_prev_non_clean card value in the table. 545 // Once again, as in Case 1, we consider two subcases, depending 546 // on whether the card lies in the occupied or unoccupied part 547 // of the space at the start of the young collection. 548 // o Case 3a. Let us say the card is in the occupied part of 549 // the old gen at the start of the young collection. In that 550 // case, the card will be scanned by the younger refs scanning 551 // code which will set it to cur_younger_gen. In a subsequent 552 // scan, the card will be considered again and get its final 553 // correct value. [End Case 3a] 554 // o Case 3b. Now consider the case where the card is in the 555 // unoccupied part of the old gen, and is occupied as a result 556 // of promotions during thus young gc. In that case, 557 // the card will not be scanned for younger refs. The presence 558 // of newly promoted objects on the card will then result in 559 // its keeping the value cur_younger_gen_and_prev_non_clean 560 // value, which we have dealt with in Case 3 here. [End Case 3b] 561 // [End Case 3] 562 // 563 // (Please refer to the code in the helper class 564 // ClearNonCleanCardWrapper and in CardTable for details.) 565 // 566 // The informal arguments above can be tightened into a formal 567 // correctness proof and it behooves us to write up such a proof, 568 // or to use model checking to prove that there are no lingering 569 // concerns. 570 // 571 // Clearly because of Case 3b one cannot bound the time for 572 // which a card will retain what we have called a "stale" value. 573 // However, one can obtain a Loose upper bound on the redundant 574 // work as a result of such stale values. Note first that any 575 // time a stale card lies in the occupied part of the space at 576 // the start of the collection, it is scanned by younger refs 577 // code and we can define a rank function on card values that 578 // declines when this is so. Note also that when a card does not 579 // lie in the occupied part of the space at the beginning of a 580 // young collection, its rank can either decline or stay unchanged. 581 // In this case, no extra work is done in terms of redundant 582 // younger refs scanning of that card. 583 // Then, the case analysis above reveals that, in the worst case, 584 // any such stale card will be scanned unnecessarily at most twice. 585 // 586 // It is nonetheless advisable to try and get rid of some of this 587 // redundant work in a subsequent (low priority) re-design of 588 // the card-scanning code, if only to simplify the underlying 589 // state machine analysis/proof. ysr 1/28/2002. XXX 590 cur_entry++; 591 } 592 } 593 } 594 595 void CardTableRS::verify() { 596 // At present, we only know how to verify the card table RS for 597 // generational heaps. 598 VerifyCTGenClosure blk(this); 599 GenCollectedHeap::heap()->generation_iterate(&blk, false); 600 CardTable::verify(); 601 } 602 603 CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) : 604 CardTable(whole_heap, scanned_concurrently), 605 _cur_youngergen_card_val(youngergenP1_card), 606 // LNC functionality 607 _lowest_non_clean(NULL), 608 _lowest_non_clean_chunk_size(NULL), 609 _lowest_non_clean_base_chunk_index(NULL), 610 _last_LNC_resizing_collection(NULL) 611 { 612 // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() 613 // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. 614 uint max_gens = 2; 615 _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, 616 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 617 if (_last_cur_val_in_gen == NULL) { 618 vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); 619 } 620 for (uint i = 0; i < max_gens + 1; i++) { 621 _last_cur_val_in_gen[i] = clean_card_val(); 622 } 623 } 624 625 CardTableRS::~CardTableRS() { 626 if (_last_cur_val_in_gen) { 627 FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); 628 _last_cur_val_in_gen = NULL; 629 } 630 if (_lowest_non_clean) { 631 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); 632 _lowest_non_clean = NULL; 633 } 634 if (_lowest_non_clean_chunk_size) { 635 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); 636 _lowest_non_clean_chunk_size = NULL; 637 } 638 if (_lowest_non_clean_base_chunk_index) { 639 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); 640 _lowest_non_clean_base_chunk_index = NULL; 641 } 642 if (_last_LNC_resizing_collection) { 643 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); 644 _last_LNC_resizing_collection = NULL; 645 } 646 } 647 648 void CardTableRS::initialize() { 649 CardTable::initialize(); 650 _lowest_non_clean = 651 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); 652 _lowest_non_clean_chunk_size = 653 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); 654 _lowest_non_clean_base_chunk_index = 655 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); 656 _last_LNC_resizing_collection = 657 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); 658 if (_lowest_non_clean == NULL 659 || _lowest_non_clean_chunk_size == NULL 660 || _lowest_non_clean_base_chunk_index == NULL 661 || _last_LNC_resizing_collection == NULL) 662 vm_exit_during_initialization("couldn't allocate an LNC array."); 663 for (int i = 0; i < _max_covered_regions; i++) { 664 _lowest_non_clean[i] = NULL; 665 _lowest_non_clean_chunk_size[i] = 0; 666 _last_LNC_resizing_collection[i] = -1; 667 } 668 } 669 670 bool CardTableRS::card_will_be_scanned(jbyte cv) { 671 return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv); 672 } 673 674 bool CardTableRS::card_may_have_been_dirty(jbyte cv) { 675 return 676 cv != clean_card && 677 (card_is_dirty_wrt_gen_iter(cv) || 678 CardTableRS::youngergen_may_have_been_dirty(cv)); 679 } 680 681 void CardTableRS::non_clean_card_iterate_possibly_parallel( 682 Space* sp, 683 MemRegion mr, 684 OopsInGenClosure* cl, 685 CardTableRS* ct, 686 uint n_threads) 687 { 688 if (!mr.is_empty()) { 689 if (n_threads > 0) { 690 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 691 } else { 692 // clear_cl finds contiguous dirty ranges of cards to process and clear. 693 694 // This is the single-threaded version used by DefNew. 695 const bool parallel = false; 696 697 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); 698 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); 699 700 clear_cl.do_MemRegion(mr); 701 } 702 } 703 } 704 705 void CardTableRS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, 706 OopsInGenClosure* cl, CardTableRS* ct, 707 uint n_threads) { 708 fatal("Parallel gc not supported here."); 709 } 710 711 bool CardTableRS::is_in_young(oop obj) const { 712 return GenCollectedHeap::heap()->is_in_young(obj); 713 }