1 /* 2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_cardTableRS.cpp.incl" 27 28 CardTableRS::CardTableRS(MemRegion whole_heap, 29 int max_covered_regions) : 30 GenRemSet(), 31 _cur_youngergen_card_val(youngergenP1_card), 32 _regions_to_iterate(max_covered_regions - 1) 33 { 34 #ifndef SERIALGC 35 if (UseG1GC) { 36 _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap, 37 max_covered_regions); 38 } else { 39 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); 40 } 41 #else 42 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); 43 #endif 44 set_bs(_ct_bs); 45 _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1]; 46 if (_last_cur_val_in_gen == NULL) { 47 vm_exit_during_initialization("Could not last_cur_val_in_gen array."); 48 } 49 for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) { 50 _last_cur_val_in_gen[i] = clean_card_val(); 51 } 52 _ct_bs->set_CTRS(this); 53 } 54 55 void CardTableRS::resize_covered_region(MemRegion new_region) { 56 _ct_bs->resize_covered_region(new_region); 57 } 58 59 jbyte CardTableRS::find_unused_youngergenP_card_value() { 60 for (jbyte v = youngergenP1_card; 61 v < cur_youngergen_and_prev_nonclean_card; 62 v++) { 63 bool seen = false; 64 for (int g = 0; g < _regions_to_iterate; g++) { 65 if (_last_cur_val_in_gen[g] == v) { 66 seen = true; 67 break; 68 } 69 } 70 if (!seen) return v; 71 } 72 ShouldNotReachHere(); 73 return 0; 74 } 75 76 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) { 77 // Parallel or sequential, we must always set the prev to equal the 78 // last one written. 79 if (parallel) { 80 // Find a parallel value to be used next. 81 jbyte next_val = find_unused_youngergenP_card_value(); 82 set_cur_youngergen_card_val(next_val); 83 84 } else { 85 // In an sequential traversal we will always write youngergen, so that 86 // the inline barrier is correct. 87 set_cur_youngergen_card_val(youngergen_card); 88 } 89 } 90 91 void CardTableRS::younger_refs_iterate(Generation* g, 92 OopsInGenClosure* blk) { 93 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); 94 g->younger_refs_iterate(blk); 95 } 96 97 class ClearNoncleanCardWrapper: public MemRegionClosure { 98 MemRegionClosure* _dirty_card_closure; 99 CardTableRS* _ct; 100 bool _is_par; 101 private: 102 // Clears the given card, return true if the corresponding card should be 103 // processed. 104 bool clear_card(jbyte* entry) { 105 if (_is_par) { 106 while (true) { 107 // In the parallel case, we may have to do this several times. 108 jbyte entry_val = *entry; 109 assert(entry_val != CardTableRS::clean_card_val(), 110 "We shouldn't be looking at clean cards, and this should " 111 "be the only place they get cleaned."); 112 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) 113 || _ct->is_prev_youngergen_card_val(entry_val)) { 114 jbyte res = 115 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); 116 if (res == entry_val) { 117 break; 118 } else { 119 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, 120 "The CAS above should only fail if another thread did " 121 "a GC write barrier."); 122 } 123 } else if (entry_val == 124 CardTableRS::cur_youngergen_and_prev_nonclean_card) { 125 // Parallelism shouldn't matter in this case. Only the thread 126 // assigned to scan the card should change this value. 127 *entry = _ct->cur_youngergen_card_val(); 128 break; 129 } else { 130 assert(entry_val == _ct->cur_youngergen_card_val(), 131 "Should be the only possibility."); 132 // In this case, the card was clean before, and become 133 // cur_youngergen only because of processing of a promoted object. 134 // We don't have to look at the card. 135 return false; 136 } 137 } 138 return true; 139 } else { 140 jbyte entry_val = *entry; 141 assert(entry_val != CardTableRS::clean_card_val(), 142 "We shouldn't be looking at clean cards, and this should " 143 "be the only place they get cleaned."); 144 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 145 "This should be possible in the sequential case."); 146 *entry = CardTableRS::clean_card_val(); 147 return true; 148 } 149 } 150 151 public: 152 ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure, 153 CardTableRS* ct) : 154 _dirty_card_closure(dirty_card_closure), _ct(ct) { 155 _is_par = (SharedHeap::heap()->n_par_threads() > 0); 156 } 157 void do_MemRegion(MemRegion mr) { 158 // We start at the high end of "mr", walking backwards 159 // while accumulating a contiguous dirty range of cards in 160 // [start_of_non_clean, end_of_non_clean) which we then 161 // process en masse. 162 HeapWord* end_of_non_clean = mr.end(); 163 HeapWord* start_of_non_clean = end_of_non_clean; 164 jbyte* entry = _ct->byte_for(mr.last()); 165 const jbyte* first_entry = _ct->byte_for(mr.start()); 166 while (entry >= first_entry) { 167 HeapWord* cur = _ct->addr_for(entry); 168 if (!clear_card(entry)) { 169 // We hit a clean card; process any non-empty 170 // dirty range accumulated so far. 171 if (start_of_non_clean < end_of_non_clean) { 172 MemRegion mr2(start_of_non_clean, end_of_non_clean); 173 _dirty_card_closure->do_MemRegion(mr2); 174 } 175 // Reset the dirty window while continuing to 176 // look for the next dirty window to process. 177 end_of_non_clean = cur; 178 start_of_non_clean = end_of_non_clean; 179 } 180 // Open the left end of the window one card to the left. 181 start_of_non_clean = cur; 182 // Note that "entry" leads "start_of_non_clean" in 183 // its leftward excursion after this point 184 // in the loop and, when we hit the left end of "mr", 185 // will point off of the left end of the card-table 186 // for "mr". 187 entry--; 188 } 189 // If the first card of "mr" was dirty, we will have 190 // been left with a dirty window, co-initial with "mr", 191 // which we now process. 192 if (start_of_non_clean < end_of_non_clean) { 193 MemRegion mr2(start_of_non_clean, end_of_non_clean); 194 _dirty_card_closure->do_MemRegion(mr2); 195 } 196 } 197 }; 198 // clean (by dirty->clean before) ==> cur_younger_gen 199 // dirty ==> cur_youngergen_and_prev_nonclean_card 200 // precleaned ==> cur_youngergen_and_prev_nonclean_card 201 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card 202 // cur-younger-gen ==> cur_younger_gen 203 // cur_youngergen_and_prev_nonclean_card ==> no change. 204 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { 205 jbyte* entry = ct_bs()->byte_for(field); 206 do { 207 jbyte entry_val = *entry; 208 // We put this first because it's probably the most common case. 209 if (entry_val == clean_card_val()) { 210 // No threat of contention with cleaning threads. 211 *entry = cur_youngergen_card_val(); 212 return; 213 } else if (card_is_dirty_wrt_gen_iter(entry_val) 214 || is_prev_youngergen_card_val(entry_val)) { 215 // Mark it as both cur and prev youngergen; card cleaning thread will 216 // eventually remove the previous stuff. 217 jbyte new_val = cur_youngergen_and_prev_nonclean_card; 218 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); 219 // Did the CAS succeed? 220 if (res == entry_val) return; 221 // Otherwise, retry, to see the new value. 222 continue; 223 } else { 224 assert(entry_val == cur_youngergen_and_prev_nonclean_card 225 || entry_val == cur_youngergen_card_val(), 226 "should be only possibilities."); 227 return; 228 } 229 } while (true); 230 } 231 232 void CardTableRS::younger_refs_in_space_iterate(Space* sp, 233 OopsInGenClosure* cl) { 234 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(), 235 cl->gen_boundary()); 236 ClearNoncleanCardWrapper clear_cl(dcto_cl, this); 237 238 _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(), 239 dcto_cl, &clear_cl, false); 240 } 241 242 void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) { 243 GenCollectedHeap* gch = GenCollectedHeap::heap(); 244 // Generations younger than gen have been evacuated. We can clear 245 // card table entries for gen (we know that it has no pointers 246 // to younger gens) and for those below. The card tables for 247 // the youngest gen need never be cleared, and those for perm gen 248 // will be cleared based on the parameter clear_perm. 249 // There's a bit of subtlety in the clear() and invalidate() 250 // methods that we exploit here and in invalidate_or_clear() 251 // below to avoid missing cards at the fringes. If clear() or 252 // invalidate() are changed in the future, this code should 253 // be revisited. 20040107.ysr 254 Generation* g = gen; 255 for(Generation* prev_gen = gch->prev_gen(g); 256 prev_gen != NULL; 257 g = prev_gen, prev_gen = gch->prev_gen(g)) { 258 MemRegion to_be_cleared_mr = g->prev_used_region(); 259 clear(to_be_cleared_mr); 260 } 261 // Clear perm gen cards if asked to do so. 262 if (clear_perm) { 263 MemRegion to_be_cleared_mr = gch->perm_gen()->prev_used_region(); 264 clear(to_be_cleared_mr); 265 } 266 } 267 268 void CardTableRS::invalidate_or_clear(Generation* gen, bool younger, 269 bool perm) { 270 GenCollectedHeap* gch = GenCollectedHeap::heap(); 271 // For each generation gen (and younger and/or perm) 272 // invalidate the cards for the currently occupied part 273 // of that generation and clear the cards for the 274 // unoccupied part of the generation (if any, making use 275 // of that generation's prev_used_region to determine that 276 // region). No need to do anything for the youngest 277 // generation. Also see note#20040107.ysr above. 278 Generation* g = gen; 279 for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL; 280 g = prev_gen, prev_gen = gch->prev_gen(g)) { 281 MemRegion used_mr = g->used_region(); 282 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr); 283 if (!to_be_cleared_mr.is_empty()) { 284 clear(to_be_cleared_mr); 285 } 286 invalidate(used_mr); 287 if (!younger) break; 288 } 289 // Clear perm gen cards if asked to do so. 290 if (perm) { 291 g = gch->perm_gen(); 292 MemRegion used_mr = g->used_region(); 293 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr); 294 if (!to_be_cleared_mr.is_empty()) { 295 clear(to_be_cleared_mr); 296 } 297 invalidate(used_mr); 298 } 299 } 300 301 302 class VerifyCleanCardClosure: public OopClosure { 303 private: 304 HeapWord* _boundary; 305 HeapWord* _begin; 306 HeapWord* _end; 307 protected: 308 template <class T> void do_oop_work(T* p) { 309 HeapWord* jp = (HeapWord*)p; 310 if (jp >= _begin && jp < _end) { 311 oop obj = oopDesc::load_decode_heap_oop(p); 312 guarantee(obj == NULL || 313 (HeapWord*)p < _boundary || 314 (HeapWord*)obj >= _boundary, 315 "pointer on clean card crosses boundary"); 316 } 317 } 318 public: 319 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : 320 _boundary(b), _begin(begin), _end(end) {} 321 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } 322 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } 323 }; 324 325 class VerifyCTSpaceClosure: public SpaceClosure { 326 private: 327 CardTableRS* _ct; 328 HeapWord* _boundary; 329 public: 330 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) : 331 _ct(ct), _boundary(boundary) {} 332 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); } 333 }; 334 335 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure { 336 CardTableRS* _ct; 337 public: 338 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} 339 void do_generation(Generation* gen) { 340 // Skip the youngest generation. 341 if (gen->level() == 0) return; 342 // Normally, we're interested in pointers to younger generations. 343 VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); 344 gen->space_iterate(&blk, true); 345 } 346 }; 347 348 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { 349 // We don't need to do young-gen spaces. 350 if (s->end() <= gen_boundary) return; 351 MemRegion used = s->used_region(); 352 353 jbyte* cur_entry = byte_for(used.start()); 354 jbyte* limit = byte_after(used.last()); 355 while (cur_entry < limit) { 356 if (*cur_entry == CardTableModRefBS::clean_card) { 357 jbyte* first_dirty = cur_entry+1; 358 while (first_dirty < limit && 359 *first_dirty == CardTableModRefBS::clean_card) { 360 first_dirty++; 361 } 362 // If the first object is a regular object, and it has a 363 // young-to-old field, that would mark the previous card. 364 HeapWord* boundary = addr_for(cur_entry); 365 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty); 366 HeapWord* boundary_block = s->block_start(boundary); 367 HeapWord* begin = boundary; // Until proven otherwise. 368 HeapWord* start_block = boundary_block; // Until proven otherwise. 369 if (boundary_block < boundary) { 370 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) { 371 oop boundary_obj = oop(boundary_block); 372 if (!boundary_obj->is_objArray() && 373 !boundary_obj->is_typeArray()) { 374 guarantee(cur_entry > byte_for(used.start()), 375 "else boundary would be boundary_block"); 376 if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) { 377 begin = boundary_block + s->block_size(boundary_block); 378 start_block = begin; 379 } 380 } 381 } 382 } 383 // Now traverse objects until end. 384 HeapWord* cur = start_block; 385 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); 386 while (cur < end) { 387 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { 388 oop(cur)->oop_iterate(&verify_blk); 389 } 390 cur += s->block_size(cur); 391 } 392 cur_entry = first_dirty; 393 } else { 394 // We'd normally expect that cur_youngergen_and_prev_nonclean_card 395 // is a transient value, that cannot be in the card table 396 // except during GC, and thus assert that: 397 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card, 398 // "Illegal CT value"); 399 // That however, need not hold, as will become clear in the 400 // following... 401 402 // We'd normally expect that if we are in the parallel case, 403 // we can't have left a prev value (which would be different 404 // from the current value) in the card table, and so we'd like to 405 // assert that: 406 // guarantee(cur_youngergen_card_val() == youngergen_card 407 // || !is_prev_youngergen_card_val(*cur_entry), 408 // "Illegal CT value"); 409 // That, however, may not hold occasionally, because of 410 // CMS or MSC in the old gen. To wit, consider the 411 // following two simple illustrative scenarios: 412 // (a) CMS: Consider the case where a large object L 413 // spanning several cards is allocated in the old 414 // gen, and has a young gen reference stored in it, dirtying 415 // some interior cards. A young collection scans the card, 416 // finds a young ref and installs a youngergenP_n value. 417 // L then goes dead. Now a CMS collection starts, 418 // finds L dead and sweeps it up. Assume that L is 419 // abutting _unallocated_blk, so _unallocated_blk is 420 // adjusted down to (below) L. Assume further that 421 // no young collection intervenes during this CMS cycle. 422 // The next young gen cycle will not get to look at this 423 // youngergenP_n card since it lies in the unoccupied 424 // part of the space. 425 // Some young collections later the blocks on this 426 // card can be re-allocated either due to direct allocation 427 // or due to absorbing promotions. At this time, the 428 // before-gc verification will fail the above assert. 429 // (b) MSC: In this case, an object L with a young reference 430 // is on a card that (therefore) holds a youngergen_n value. 431 // Suppose also that L lies towards the end of the used 432 // the used space before GC. An MSC collection 433 // occurs that compacts to such an extent that this 434 // card is no longer in the occupied part of the space. 435 // Since current code in MSC does not always clear cards 436 // in the unused part of old gen, this stale youngergen_n 437 // value is left behind and can later be covered by 438 // an object when promotion or direct allocation 439 // re-allocates that part of the heap. 440 // 441 // Fortunately, the presence of such stale card values is 442 // "only" a minor annoyance in that subsequent young collections 443 // might needlessly scan such cards, but would still never corrupt 444 // the heap as a result. However, it's likely not to be a significant 445 // performance inhibitor in practice. For instance, 446 // some recent measurements with unoccupied cards eagerly cleared 447 // out to maintain this invariant, showed next to no 448 // change in young collection times; of course one can construct 449 // degenerate examples where the cost can be significant.) 450 // Note, in particular, that if the "stale" card is modified 451 // after re-allocation, it would be dirty, not "stale". Thus, 452 // we can never have a younger ref in such a card and it is 453 // safe not to scan that card in any collection. [As we see 454 // below, we do some unnecessary scanning 455 // in some cases in the current parallel scanning algorithm.] 456 // 457 // The main point below is that the parallel card scanning code 458 // deals correctly with these stale card values. There are two main 459 // cases to consider where we have a stale "younger gen" value and a 460 // "derivative" case to consider, where we have a stale 461 // "cur_younger_gen_and_prev_non_clean" value, as will become 462 // apparent in the case analysis below. 463 // o Case 1. If the stale value corresponds to a younger_gen_n 464 // value other than the cur_younger_gen value then the code 465 // treats this as being tantamount to a prev_younger_gen 466 // card. This means that the card may be unnecessarily scanned. 467 // There are two sub-cases to consider: 468 // o Case 1a. Let us say that the card is in the occupied part 469 // of the generation at the time the collection begins. In 470 // that case the card will be either cleared when it is scanned 471 // for young pointers, or will be set to cur_younger_gen as a 472 // result of promotion. (We have elided the normal case where 473 // the scanning thread and the promoting thread interleave 474 // possibly resulting in a transient 475 // cur_younger_gen_and_prev_non_clean value before settling 476 // to cur_younger_gen. [End Case 1a.] 477 // o Case 1b. Consider now the case when the card is in the unoccupied 478 // part of the space which becomes occupied because of promotions 479 // into it during the current young GC. In this case the card 480 // will never be scanned for young references. The current 481 // code will set the card value to either 482 // cur_younger_gen_and_prev_non_clean or leave 483 // it with its stale value -- because the promotions didn't 484 // result in any younger refs on that card. Of these two 485 // cases, the latter will be covered in Case 1a during 486 // a subsequent scan. To deal with the former case, we need 487 // to further consider how we deal with a stale value of 488 // cur_younger_gen_and_prev_non_clean in our case analysis 489 // below. This we do in Case 3 below. [End Case 1b] 490 // [End Case 1] 491 // o Case 2. If the stale value corresponds to cur_younger_gen being 492 // a value not necessarily written by a current promotion, the 493 // card will not be scanned by the younger refs scanning code. 494 // (This is OK since as we argued above such cards cannot contain 495 // any younger refs.) The result is that this value will be 496 // treated as a prev_younger_gen value in a subsequent collection, 497 // which is addressed in Case 1 above. [End Case 2] 498 // o Case 3. We here consider the "derivative" case from Case 1b. above 499 // because of which we may find a stale 500 // cur_younger_gen_and_prev_non_clean card value in the table. 501 // Once again, as in Case 1, we consider two subcases, depending 502 // on whether the card lies in the occupied or unoccupied part 503 // of the space at the start of the young collection. 504 // o Case 3a. Let us say the card is in the occupied part of 505 // the old gen at the start of the young collection. In that 506 // case, the card will be scanned by the younger refs scanning 507 // code which will set it to cur_younger_gen. In a subsequent 508 // scan, the card will be considered again and get its final 509 // correct value. [End Case 3a] 510 // o Case 3b. Now consider the case where the card is in the 511 // unoccupied part of the old gen, and is occupied as a result 512 // of promotions during thus young gc. In that case, 513 // the card will not be scanned for younger refs. The presence 514 // of newly promoted objects on the card will then result in 515 // its keeping the value cur_younger_gen_and_prev_non_clean 516 // value, which we have dealt with in Case 3 here. [End Case 3b] 517 // [End Case 3] 518 // 519 // (Please refer to the code in the helper class 520 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.) 521 // 522 // The informal arguments above can be tightened into a formal 523 // correctness proof and it behooves us to write up such a proof, 524 // or to use model checking to prove that there are no lingering 525 // concerns. 526 // 527 // Clearly because of Case 3b one cannot bound the time for 528 // which a card will retain what we have called a "stale" value. 529 // However, one can obtain a Loose upper bound on the redundant 530 // work as a result of such stale values. Note first that any 531 // time a stale card lies in the occupied part of the space at 532 // the start of the collection, it is scanned by younger refs 533 // code and we can define a rank function on card values that 534 // declines when this is so. Note also that when a card does not 535 // lie in the occupied part of the space at the beginning of a 536 // young collection, its rank can either decline or stay unchanged. 537 // In this case, no extra work is done in terms of redundant 538 // younger refs scanning of that card. 539 // Then, the case analysis above reveals that, in the worst case, 540 // any such stale card will be scanned unnecessarily at most twice. 541 // 542 // It is nonethelss advisable to try and get rid of some of this 543 // redundant work in a subsequent (low priority) re-design of 544 // the card-scanning code, if only to simplify the underlying 545 // state machine analysis/proof. ysr 1/28/2002. XXX 546 cur_entry++; 547 } 548 } 549 } 550 551 void CardTableRS::verify() { 552 // At present, we only know how to verify the card table RS for 553 // generational heaps. 554 VerifyCTGenClosure blk(this); 555 CollectedHeap* ch = Universe::heap(); 556 // We will do the perm-gen portion of the card table, too. 557 Generation* pg = SharedHeap::heap()->perm_gen(); 558 HeapWord* pg_boundary = pg->reserved().start(); 559 560 if (ch->kind() == CollectedHeap::GenCollectedHeap) { 561 GenCollectedHeap::heap()->generation_iterate(&blk, false); 562 _ct_bs->verify(); 563 564 // If the old gen collections also collect perm, then we are only 565 // interested in perm-to-young pointers, not perm-to-old pointers. 566 GenCollectedHeap* gch = GenCollectedHeap::heap(); 567 CollectorPolicy* cp = gch->collector_policy(); 568 if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) { 569 pg_boundary = gch->get_gen(1)->reserved().start(); 570 } 571 } 572 VerifyCTSpaceClosure perm_space_blk(this, pg_boundary); 573 SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true); 574 } 575 576 577 void CardTableRS::verify_aligned_region_empty(MemRegion mr) { 578 if (!mr.is_empty()) { 579 jbyte* cur_entry = byte_for(mr.start()); 580 jbyte* limit = byte_after(mr.last()); 581 // The region mr may not start on a card boundary so 582 // the first card may reflect a write to the space 583 // just prior to mr. 584 if (!is_aligned(mr.start())) { 585 cur_entry++; 586 } 587 for (;cur_entry < limit; cur_entry++) { 588 guarantee(*cur_entry == CardTableModRefBS::clean_card, 589 "Unexpected dirty card found"); 590 } 591 } 592 }