1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)cardTableModRefBS.cpp        1.60 07/12/05 23:34:34 JVM"
   3 #endif
   4 /*
   5  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  29 // enumerate ref fields that have been modified (since the last
  30 // enumeration.)
  31 
  32 # include "incls/_precompiled.incl"
  33 # include "incls/_cardTableModRefBS.cpp.incl"
  34 
  35 size_t CardTableModRefBS::cards_required(size_t covered_words)
  36 {
  37   // Add one for a guard card, used to detect errors.
  38   const size_t words = align_size_up(covered_words, card_size_in_words);
  39   return words / card_size_in_words + 1;
  40 }
  41 
  42 size_t CardTableModRefBS::compute_byte_map_size()
  43 {
  44   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  45                                         "unitialized, check declaration order");
  46   assert(_page_size != 0, "unitialized, check declaration order");
  47   const size_t granularity = os::vm_allocation_granularity();
  48   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  49 }
  50 
  51 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  52                                      int max_covered_regions):
  53   ModRefBarrierSet(max_covered_regions),
  54   _whole_heap(whole_heap),
  55   _guard_index(cards_required(whole_heap.word_size()) - 1),
  56   _last_valid_index(_guard_index - 1),
  57   _page_size(os::vm_page_size()),
  58   _byte_map_size(compute_byte_map_size())
  59 {
  60   _kind = BarrierSet::CardTableModRef;
  61 
  62   HeapWord* low_bound  = _whole_heap.start();
  63   HeapWord* high_bound = _whole_heap.end();
  64   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  65   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  66 
  67   assert(card_size <= 512, "card_size must be less than 512"); // why?
  68 
  69   _covered   = new MemRegion[max_covered_regions];
  70   _committed = new MemRegion[max_covered_regions];
  71   if (_covered == NULL || _committed == NULL)
  72     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  73   int i;
  74   for (i = 0; i < max_covered_regions; i++) {
  75     _covered[i].set_word_size(0);
  76     _committed[i].set_word_size(0);
  77   }
  78   _cur_covered_regions = 0;
  79 
  80   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  81     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  82   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  83   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  84                        _page_size, heap_rs.base(), heap_rs.size());
  85   if (!heap_rs.is_reserved()) {
  86     vm_exit_during_initialization("Could not reserve enough space for the "
  87                                   "card marking array");
  88   }
  89 
  90   // The assember store_check code will do an unsigned shift of the oop, 
  91   // then add it to byte_map_base, i.e.
  92   // 
  93   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
  94   _byte_map = (jbyte*) heap_rs.base();
  95   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
  96   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
  97   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
  98 
  99   jbyte* guard_card = &_byte_map[_guard_index];
 100   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 101   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 102   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 103     // Do better than this for Merlin
 104     vm_exit_out_of_memory(_page_size, "card table last card");
 105   }
 106   *guard_card = last_card;
 107 
 108    _lowest_non_clean =
 109     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
 110   _lowest_non_clean_chunk_size =
 111     NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
 112   _lowest_non_clean_base_chunk_index =
 113     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
 114   _last_LNC_resizing_collection =
 115     NEW_C_HEAP_ARRAY(int, max_covered_regions);
 116   if (_lowest_non_clean == NULL
 117       || _lowest_non_clean_chunk_size == NULL 
 118       || _lowest_non_clean_base_chunk_index == NULL 
 119       || _last_LNC_resizing_collection == NULL)
 120     vm_exit_during_initialization("couldn't allocate an LNC array.");
 121   for (i = 0; i < max_covered_regions; i++) {
 122     _lowest_non_clean[i] = NULL;
 123     _lowest_non_clean_chunk_size[i] = 0;
 124     _last_LNC_resizing_collection[i] = -1;
 125   }
 126 
 127   if (TraceCardTableModRefBS) {
 128     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 129     gclog_or_tty->print_cr("  "
 130                   "  &_byte_map[0]: " INTPTR_FORMAT
 131                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 132                   &_byte_map[0],
 133                   &_byte_map[_last_valid_index]);
 134     gclog_or_tty->print_cr("  "
 135                   "  byte_map_base: " INTPTR_FORMAT,
 136                   byte_map_base);
 137   }
 138 }
 139 
 140 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 141   int i;
 142   for (i = 0; i < _cur_covered_regions; i++) {
 143     if (_covered[i].start() == base) return i;
 144     if (_covered[i].start() > base) break;
 145   }
 146   // If we didn't find it, create a new one.
 147   assert(_cur_covered_regions < _max_covered_regions,
 148          "too many covered regions");
 149   // Move the ones above up, to maintain sorted order.
 150   for (int j = _cur_covered_regions; j > i; j--) {
 151     _covered[j] = _covered[j-1];
 152     _committed[j] = _committed[j-1];
 153   }
 154   int res = i;
 155   _cur_covered_regions++;
 156   _covered[res].set_start(base);
 157   _covered[res].set_word_size(0);
 158   jbyte* ct_start = byte_for(base);
 159   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 160   _committed[res].set_start((HeapWord*)ct_start_aligned);
 161   _committed[res].set_word_size(0);
 162   return res;
 163 }
 164 
 165 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 166   for (int i = 0; i < _cur_covered_regions; i++) {
 167     if (_covered[i].contains(addr)) {
 168       return i;
 169     }
 170   }
 171   assert(0, "address outside of heap?");
 172   return -1;
 173 }
 174 
 175 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 176   HeapWord* max_end = NULL;
 177   for (int j = 0; j < ind; j++) {
 178     HeapWord* this_end = _committed[j].end();
 179     if (this_end > max_end) max_end = this_end;
 180   }
 181   return max_end;
 182 }
 183 
 184 MemRegion CardTableModRefBS::committed_unique_to_self(int self, 
 185                                                       MemRegion mr) const {
 186   MemRegion result = mr;
 187   for (int r = 0; r < _cur_covered_regions; r += 1) {
 188     if (r != self) {
 189       result = result.minus(_committed[r]);
 190     }
 191   }
 192   // Never include the guard page.
 193   result = result.minus(_guard_region);
 194   return result;
 195 }
 196 
 197 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 198   // We don't change the start of a region, only the end.
 199   assert(_whole_heap.contains(new_region), 
 200            "attempt to cover area not in reserved area");
 201   debug_only(verify_guard();)
 202   int ind = find_covering_region_by_base(new_region.start());
 203   MemRegion old_region = _covered[ind];
 204   assert(old_region.start() == new_region.start(), "just checking");
 205   if (new_region.word_size() != old_region.word_size()) {
 206     // Commit new or uncommit old pages, if necessary.
 207     MemRegion cur_committed = _committed[ind];
 208     // Extend the end of this _commited region 
 209     // to cover the end of any lower _committed regions.
 210     // This forms overlapping regions, but never interior regions.
 211     HeapWord* max_prev_end = largest_prev_committed_end(ind);
 212     if (max_prev_end > cur_committed.end()) {
 213       cur_committed.set_end(max_prev_end);
 214     }
 215     // Align the end up to a page size (starts are already aligned).
 216     jbyte* new_end = byte_after(new_region.last());
 217     HeapWord* new_end_aligned =
 218       (HeapWord*)align_size_up((uintptr_t)new_end, _page_size);
 219     assert(new_end_aligned >= (HeapWord*) new_end,
 220            "align up, but less");
 221     // The guard page is always committed and should not be committed over.
 222     HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
 223     if (new_end_for_commit > cur_committed.end()) {
 224       // Must commit new pages.
 225       MemRegion new_committed =
 226         MemRegion(cur_committed.end(), new_end_for_commit);
 227 
 228       assert(!new_committed.is_empty(), "Region should not be empty here");
 229       if (!os::commit_memory((char*)new_committed.start(),
 230                              new_committed.byte_size(), _page_size)) {
 231         // Do better than this for Merlin
 232         vm_exit_out_of_memory(new_committed.byte_size(),
 233                 "card table expansion");
 234       }
 235     // Use new_end_aligned (as opposed to new_end_for_commit) because
 236     // the cur_committed region may include the guard region.
 237     } else if (new_end_aligned < cur_committed.end()) {
 238       // Must uncommit pages.
 239       MemRegion uncommit_region = 
 240         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 241                                                 cur_committed.end()));
 242       if (!uncommit_region.is_empty()) {
 243         if (!os::uncommit_memory((char*)uncommit_region.start(),
 244                                  uncommit_region.byte_size())) {
 245           // Do better than this for Merlin
 246           vm_exit_out_of_memory(uncommit_region.byte_size(),
 247             "card table contraction");
 248         }
 249       }
 250     }
 251     // In any case, we can reset the end of the current committed entry.
 252     _committed[ind].set_end(new_end_aligned);
 253 
 254     // The default of 0 is not necessarily clean cards.
 255     jbyte* entry;
 256     if (old_region.last() < _whole_heap.start()) {
 257       entry = byte_for(_whole_heap.start());
 258     } else {
 259       entry = byte_after(old_region.last());
 260     }
 261     assert(index_for(new_region.last()) < (int) _guard_index,
 262       "The guard card will be overwritten");
 263     jbyte* end = byte_after(new_region.last());
 264     // do nothing if we resized downward.
 265     if (entry < end) {
 266       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 267     }
 268   }
 269   // In any case, the covered size changes.
 270   _covered[ind].set_word_size(new_region.word_size());
 271   if (TraceCardTableModRefBS) {
 272     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 273     gclog_or_tty->print_cr("  "
 274                   "  _covered[%d].start(): " INTPTR_FORMAT
 275                   "  _covered[%d].last(): " INTPTR_FORMAT,
 276                   ind, _covered[ind].start(), 
 277                   ind, _covered[ind].last());
 278     gclog_or_tty->print_cr("  "
 279                   "  _committed[%d].start(): " INTPTR_FORMAT
 280                   "  _committed[%d].last(): " INTPTR_FORMAT,
 281                   ind, _committed[ind].start(),
 282                   ind, _committed[ind].last());
 283     gclog_or_tty->print_cr("  "
 284                   "  byte_for(start): " INTPTR_FORMAT
 285                   "  byte_for(last): " INTPTR_FORMAT,
 286                   byte_for(_covered[ind].start()),
 287                   byte_for(_covered[ind].last()));
 288     gclog_or_tty->print_cr("  "
 289                   "  addr_for(start): " INTPTR_FORMAT
 290                   "  addr_for(last): " INTPTR_FORMAT,
 291                   addr_for((jbyte*) _committed[ind].start()),
 292                   addr_for((jbyte*) _committed[ind].last()));
 293   }
 294   debug_only(verify_guard();)
 295 }
 296 
 297 // Note that these versions are precise!  The scanning code has to handle the
 298 // fact that the write barrier may be either precise or imprecise.
 299 
 300 void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
 301   inline_write_ref_field(field, newVal);
 302 }
 303 
 304 
 305 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
 306                                                MemRegion mr,
 307                                                DirtyCardToOopClosure* dcto_cl,
 308                                                MemRegionClosure* cl,
 309                                                bool clear) {
 310   if (!mr.is_empty()) {
 311     int n_threads = SharedHeap::heap()->n_par_threads();
 312     if (n_threads > 0) {
 313 #ifndef SERIALGC
 314       par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
 315 #else  // SERIALGC
 316       fatal("Parallel gc not supported here.");
 317 #endif // SERIALGC
 318     } else {
 319       non_clean_card_iterate_work(mr, cl, clear);
 320     }
 321   }
 322 }
 323 
 324 // NOTE: For this to work correctly, it is important that
 325 // we look for non-clean cards below (so as to catch those
 326 // marked precleaned), rather than look explicitly for dirty
 327 // cards (and miss those marked precleaned). In that sense,
 328 // the name precleaned is currently somewhat of a misnomer.
 329 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
 330                                                     MemRegionClosure* cl,
 331                                                     bool clear) {
 332   // Figure out whether we have to worry about parallelism.
 333   bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
 334   for (int i = 0; i < _cur_covered_regions; i++) {
 335     MemRegion mri = mr.intersection(_covered[i]);
 336     if (mri.word_size() > 0) {
 337       jbyte* cur_entry = byte_for(mri.last());
 338       jbyte* limit = byte_for(mri.start());
 339       while (cur_entry >= limit) {
 340         jbyte* next_entry = cur_entry - 1;
 341         if (*cur_entry != clean_card) {
 342           size_t non_clean_cards = 1;
 343           // Should the next card be included in this range of dirty cards.
 344           while (next_entry >= limit && *next_entry != clean_card) {
 345             non_clean_cards++; 
 346             cur_entry = next_entry;
 347             next_entry--;
 348           }
 349           // The memory region may not be on a card boundary.  So that
 350           // objects beyond the end of the region are not processed, make
 351           // cur_cards precise with regard to the end of the memory region.
 352           MemRegion cur_cards(addr_for(cur_entry), 
 353                               non_clean_cards * card_size_in_words);
 354           MemRegion dirty_region = cur_cards.intersection(mri);
 355           if (clear) {
 356             for (size_t i = 0; i < non_clean_cards; i++) {
 357               // Clean the dirty cards (but leave the other non-clean
 358               // alone.)  If parallel, do the cleaning atomically.
 359               jbyte cur_entry_val = cur_entry[i];
 360               if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
 361                 if (is_par) {
 362                   jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
 363                   assert(res != clean_card,
 364                          "Dirty card mysteriously cleaned");
 365                 } else {
 366                   cur_entry[i] = clean_card;
 367                 }
 368               }
 369             }
 370           }
 371           cl->do_MemRegion(dirty_region);
 372         }
 373         cur_entry = next_entry;
 374       }
 375     }
 376   }
 377 }
 378 
 379 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
 380                                                  OopClosure* cl,
 381                                                  bool clear,
 382                                                  bool before_save_marks) {
 383   // Note that dcto_cl is resource-allocated, so there is no
 384   // corresponding "delete".
 385   DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
 386   MemRegion used_mr;
 387   if (before_save_marks) {
 388     used_mr = sp->used_region_at_save_marks();
 389   } else {
 390     used_mr = sp->used_region();
 391   }
 392   non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
 393 }
 394 
 395 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 396   jbyte* cur  = byte_for(mr.start());
 397   jbyte* last = byte_after(mr.last());
 398   while (cur < last) {
 399     *cur = dirty_card;
 400     cur++;
 401   }
 402 }
 403 
 404 void CardTableModRefBS::invalidate(MemRegion mr) {
 405   for (int i = 0; i < _cur_covered_regions; i++) {
 406     MemRegion mri = mr.intersection(_covered[i]);
 407     if (!mri.is_empty()) dirty_MemRegion(mri);
 408   }
 409 }
 410 
 411 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 412   // Be conservative: only clean cards entirely contained within the
 413   // region.
 414   jbyte* cur;
 415   if (mr.start() == _whole_heap.start()) {
 416     cur = byte_for(mr.start());
 417   } else {
 418     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 419     cur = byte_after(mr.start() - 1);
 420   }
 421   jbyte* last = byte_after(mr.last());
 422   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 423 }
 424 
 425 void CardTableModRefBS::clear(MemRegion mr) {
 426   for (int i = 0; i < _cur_covered_regions; i++) {
 427     MemRegion mri = mr.intersection(_covered[i]);
 428     if (!mri.is_empty()) clear_MemRegion(mri);
 429   }
 430 }
 431 
 432 // NOTES:
 433 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
 434 //     iterates over dirty cards ranges in increasing address order.
 435 // (2) Unlike, e.g., dirty_card_range_after_preclean() below,
 436 //     this method does not make the dirty cards prelceaned.
 437 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 438                                            MemRegionClosure* cl) {
 439   for (int i = 0; i < _cur_covered_regions; i++) {
 440     MemRegion mri = mr.intersection(_covered[i]);
 441     if (!mri.is_empty()) {
 442       jbyte *cur_entry, *next_entry, *limit;
 443       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 444            cur_entry <= limit;
 445            cur_entry  = next_entry) {
 446         next_entry = cur_entry + 1;
 447         if (*cur_entry == dirty_card) {
 448           size_t dirty_cards;
 449           // Accumulate maximal dirty card range, starting at cur_entry
 450           for (dirty_cards = 1;
 451                next_entry <= limit && *next_entry == dirty_card;
 452                dirty_cards++, next_entry++);
 453           MemRegion cur_cards(addr_for(cur_entry),
 454                               dirty_cards*card_size_in_words);
 455           cl->do_MemRegion(cur_cards);
 456         }
 457       }
 458     }
 459   }
 460 }
 461 
 462 MemRegion CardTableModRefBS::dirty_card_range_after_preclean(MemRegion mr) {
 463   for (int i = 0; i < _cur_covered_regions; i++) {
 464     MemRegion mri = mr.intersection(_covered[i]);
 465     if (!mri.is_empty()) {
 466       jbyte* cur_entry, *next_entry, *limit;
 467       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 468            cur_entry <= limit;
 469            cur_entry  = next_entry) {
 470         next_entry = cur_entry + 1;
 471         if (*cur_entry == dirty_card) {
 472           size_t dirty_cards;
 473           // Accumulate maximal dirty card range, starting at cur_entry
 474           for (dirty_cards = 1;
 475                next_entry <= limit && *next_entry == dirty_card;
 476                dirty_cards++, next_entry++);
 477           MemRegion cur_cards(addr_for(cur_entry),
 478                               dirty_cards*card_size_in_words);
 479           for (size_t i = 0; i < dirty_cards; i++) {
 480              cur_entry[i] = precleaned_card;
 481           }
 482           return cur_cards;
 483         }
 484       }
 485     }
 486   }
 487   return MemRegion(mr.end(), mr.end());
 488 }
 489 
 490 // Set all the dirty cards in the given region to "precleaned" state.
 491 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
 492   for (int i = 0; i < _cur_covered_regions; i++) {
 493     MemRegion mri = mr.intersection(_covered[i]);
 494     if (!mri.is_empty()) {
 495       jbyte *cur_entry, *limit;
 496       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 497            cur_entry <= limit;
 498            cur_entry++) {
 499         if (*cur_entry == dirty_card) {
 500           *cur_entry = precleaned_card;
 501         }
 502       }
 503     }
 504   }
 505 }
 506 
 507 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 508   return card_size * os::vm_page_size();
 509 }
 510 
 511 void CardTableModRefBS::verify_guard() {
 512   // For product build verification
 513   guarantee(_byte_map[_guard_index] == last_card,
 514             "card table guard has been modified");
 515 }
 516 
 517 void CardTableModRefBS::verify() {
 518   verify_guard();
 519 }
 520 
 521 #ifndef PRODUCT
 522 class GuaranteeNotModClosure: public MemRegionClosure {
 523   CardTableModRefBS* _ct;
 524 public:
 525   GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
 526   void do_MemRegion(MemRegion mr) {
 527     jbyte* entry = _ct->byte_for(mr.start());
 528     guarantee(*entry != CardTableModRefBS::clean_card,
 529               "Dirty card in region that should be clean");
 530   }
 531 };
 532 
 533 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
 534   GuaranteeNotModClosure blk(this);
 535   non_clean_card_iterate_work(mr, &blk, false);
 536 }
 537 #endif
 538 
 539 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
 540   return
 541     CardTableModRefBS::card_will_be_scanned(cv) ||
 542     _rs->is_prev_nonclean_card_val(cv);
 543 };
 544 
 545 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
 546   return
 547     cv != clean_card &&
 548     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
 549      CardTableRS::youngergen_may_have_been_dirty(cv));
 550 };