1 /*
   2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  26 // enumerate ref fields that have been modified (since the last
  27 // enumeration.)
  28 
  29 # include "incls/_precompiled.incl"
  30 # include "incls/_cardTableModRefBS.cpp.incl"
  31 
  32 size_t CardTableModRefBS::cards_required(size_t covered_words)
  33 {
  34   // Add one for a guard card, used to detect errors.
  35   const size_t words = align_size_up(covered_words, card_size_in_words);
  36   return words / card_size_in_words + 1;
  37 }
  38 
  39 size_t CardTableModRefBS::compute_byte_map_size()
  40 {
  41   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  42                                         "unitialized, check declaration order");
  43   assert(_page_size != 0, "unitialized, check declaration order");
  44   const size_t granularity = os::vm_allocation_granularity();
  45   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  46 }
  47 
  48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  49                                      int max_covered_regions):
  50   ModRefBarrierSet(max_covered_regions),
  51   _whole_heap(whole_heap),
  52   _guard_index(cards_required(whole_heap.word_size()) - 1),
  53   _last_valid_index(_guard_index - 1),
  54   _page_size(os::vm_page_size()),
  55   _byte_map_size(compute_byte_map_size())
  56 {
  57   _kind = BarrierSet::CardTableModRef;
  58 
  59   HeapWord* low_bound  = _whole_heap.start();
  60   HeapWord* high_bound = _whole_heap.end();
  61   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  62   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  63 
  64   assert(card_size <= 512, "card_size must be less than 512"); // why?
  65 
  66   _covered   = new MemRegion[max_covered_regions];
  67   _committed = new MemRegion[max_covered_regions];
  68   if (_covered == NULL || _committed == NULL)
  69     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  70   int i;
  71   for (i = 0; i < max_covered_regions; i++) {
  72     _covered[i].set_word_size(0);
  73     _committed[i].set_word_size(0);
  74   }
  75   _cur_covered_regions = 0;
  76 
  77   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  78     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  79   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  80   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  81                        _page_size, heap_rs.base(), heap_rs.size());
  82   if (!heap_rs.is_reserved()) {
  83     vm_exit_during_initialization("Could not reserve enough space for the "
  84                                   "card marking array");
  85   }
  86 
  87   // The assember store_check code will do an unsigned shift of the oop,
  88   // then add it to byte_map_base, i.e.
  89   //
  90   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
  91   _byte_map = (jbyte*) heap_rs.base();
  92   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
  93   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
  94   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
  95 
  96   jbyte* guard_card = &_byte_map[_guard_index];
  97   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
  98   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
  99   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 100     // Do better than this for Merlin
 101     vm_exit_out_of_memory(_page_size, "card table last card");
 102   }
 103   *guard_card = last_card;
 104 
 105    _lowest_non_clean =
 106     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
 107   _lowest_non_clean_chunk_size =
 108     NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
 109   _lowest_non_clean_base_chunk_index =
 110     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
 111   _last_LNC_resizing_collection =
 112     NEW_C_HEAP_ARRAY(int, max_covered_regions);
 113   if (_lowest_non_clean == NULL
 114       || _lowest_non_clean_chunk_size == NULL
 115       || _lowest_non_clean_base_chunk_index == NULL
 116       || _last_LNC_resizing_collection == NULL)
 117     vm_exit_during_initialization("couldn't allocate an LNC array.");
 118   for (i = 0; i < max_covered_regions; i++) {
 119     _lowest_non_clean[i] = NULL;
 120     _lowest_non_clean_chunk_size[i] = 0;
 121     _last_LNC_resizing_collection[i] = -1;
 122   }
 123 
 124   if (TraceCardTableModRefBS) {
 125     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 126     gclog_or_tty->print_cr("  "
 127                   "  &_byte_map[0]: " INTPTR_FORMAT
 128                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 129                   &_byte_map[0],
 130                   &_byte_map[_last_valid_index]);
 131     gclog_or_tty->print_cr("  "
 132                   "  byte_map_base: " INTPTR_FORMAT,
 133                   byte_map_base);
 134   }
 135 }
 136 
 137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 138   int i;
 139   for (i = 0; i < _cur_covered_regions; i++) {
 140     if (_covered[i].start() == base) return i;
 141     if (_covered[i].start() > base) break;
 142   }
 143   // If we didn't find it, create a new one.
 144   assert(_cur_covered_regions < _max_covered_regions,
 145          "too many covered regions");
 146   // Move the ones above up, to maintain sorted order.
 147   for (int j = _cur_covered_regions; j > i; j--) {
 148     _covered[j] = _covered[j-1];
 149     _committed[j] = _committed[j-1];
 150   }
 151   int res = i;
 152   _cur_covered_regions++;
 153   _covered[res].set_start(base);
 154   _covered[res].set_word_size(0);
 155   jbyte* ct_start = byte_for(base);
 156   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 157   _committed[res].set_start((HeapWord*)ct_start_aligned);
 158   _committed[res].set_word_size(0);
 159   return res;
 160 }
 161 
 162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 163   for (int i = 0; i < _cur_covered_regions; i++) {
 164     if (_covered[i].contains(addr)) {
 165       return i;
 166     }
 167   }
 168   assert(0, "address outside of heap?");
 169   return -1;
 170 }
 171 
 172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 173   HeapWord* max_end = NULL;
 174   for (int j = 0; j < ind; j++) {
 175     HeapWord* this_end = _committed[j].end();
 176     if (this_end > max_end) max_end = this_end;
 177   }
 178   return max_end;
 179 }
 180 
 181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 182                                                       MemRegion mr) const {
 183   MemRegion result = mr;
 184   for (int r = 0; r < _cur_covered_regions; r += 1) {
 185     if (r != self) {
 186       result = result.minus(_committed[r]);
 187     }
 188   }
 189   // Never include the guard page.
 190   result = result.minus(_guard_region);
 191   return result;
 192 }
 193 
 194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 195   // We don't change the start of a region, only the end.
 196   assert(_whole_heap.contains(new_region),
 197            "attempt to cover area not in reserved area");
 198   debug_only(verify_guard();)
 199   // collided is true if the expansion would push into another committed region
 200   debug_only(bool collided = false;)
 201   int const ind = find_covering_region_by_base(new_region.start());
 202   MemRegion const old_region = _covered[ind];
 203   assert(old_region.start() == new_region.start(), "just checking");
 204   if (new_region.word_size() != old_region.word_size()) {
 205     // Commit new or uncommit old pages, if necessary.
 206     MemRegion cur_committed = _committed[ind];
 207     // Extend the end of this _commited region
 208     // to cover the end of any lower _committed regions.
 209     // This forms overlapping regions, but never interior regions.
 210     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 211     if (max_prev_end > cur_committed.end()) {
 212       cur_committed.set_end(max_prev_end);
 213     }
 214     // Align the end up to a page size (starts are already aligned).
 215     jbyte* const new_end = byte_after(new_region.last());
 216     HeapWord* new_end_aligned =
 217       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 218     assert(new_end_aligned >= (HeapWord*) new_end,
 219            "align up, but less");
 220     // Check the other regions (excludes "ind") to ensure that
 221     // the new_end_aligned does not intrude onto the committed
 222     // space of another region.
 223     int ri = 0;
 224     for (ri = 0; ri < _cur_covered_regions; ri++) {
 225       if (ri != ind) {
 226         if (_committed[ri].contains(new_end_aligned)) {
 227           // The prior check included in the assert
 228           // (new_end_aligned >= _committed[ri].start())
 229           // is redundant with the "contains" test.
 230           // Any region containing the new end
 231           // should start at or beyond the region found (ind)
 232           // for the new end (committed regions are not expected to
 233           // be proper subsets of other committed regions).
 234           assert(_committed[ri].start() >= _committed[ind].start(),
 235                  "New end of committed region is inconsistent");
 236           new_end_aligned = _committed[ri].start();
 237           // new_end_aligned can be equal to the start of its
 238           // committed region (i.e., of "ind") if a second
 239           // region following "ind" also start at the same location
 240           // as "ind".
 241           assert(new_end_aligned >= _committed[ind].start(),
 242             "New end of committed region is before start");
 243           debug_only(collided = true;)
 244           // Should only collide with 1 region
 245           break;
 246         }
 247       }
 248     }
 249 #ifdef ASSERT
 250     for (++ri; ri < _cur_covered_regions; ri++) {
 251       assert(!_committed[ri].contains(new_end_aligned),
 252         "New end of committed region is in a second committed region");
 253     }
 254 #endif
 255     // The guard page is always committed and should not be committed over.
 256     // "guarded" is used for assertion checking below and recalls the fact
 257     // that the would-be end of the new committed region would have
 258     // penetrated the guard page.
 259     HeapWord* new_end_for_commit = new_end_aligned;
 260 
 261     DEBUG_ONLY(bool guarded = false;)
 262     if (new_end_for_commit > _guard_region.start()) {
 263       new_end_for_commit = _guard_region.start();
 264       DEBUG_ONLY(guarded = true;)
 265     }
 266 
 267     if (new_end_for_commit > cur_committed.end()) {
 268       // Must commit new pages.
 269       MemRegion const new_committed =
 270         MemRegion(cur_committed.end(), new_end_for_commit);
 271 
 272       assert(!new_committed.is_empty(), "Region should not be empty here");
 273       if (!os::commit_memory((char*)new_committed.start(),
 274                              new_committed.byte_size(), _page_size)) {
 275         // Do better than this for Merlin
 276         vm_exit_out_of_memory(new_committed.byte_size(),
 277                 "card table expansion");
 278       }
 279     // Use new_end_aligned (as opposed to new_end_for_commit) because
 280     // the cur_committed region may include the guard region.
 281     } else if (new_end_aligned < cur_committed.end()) {
 282       // Must uncommit pages.
 283       MemRegion const uncommit_region =
 284         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 285                                                 cur_committed.end()));
 286       if (!uncommit_region.is_empty()) {
 287         // It is not safe to uncommit cards if the boundary between
 288         // the generations is moving.  A shrink can uncommit cards
 289         // owned by generation A but being used by generation B.
 290         if (!UseAdaptiveGCBoundary) {
 291           if (!os::uncommit_memory((char*)uncommit_region.start(),
 292                                    uncommit_region.byte_size())) {
 293             assert(false, "Card table contraction failed");
 294             // The call failed so don't change the end of the
 295             // committed region.  This is better than taking the
 296             // VM down.
 297             new_end_aligned = _committed[ind].end();
 298           }
 299         } else {
 300           new_end_aligned = _committed[ind].end();
 301         }
 302       }
 303     }
 304     // In any case, we can reset the end of the current committed entry.
 305     _committed[ind].set_end(new_end_aligned);
 306 
 307 #ifdef ASSERT
 308     // Check that the last card in the new region is committed according
 309     // to the tables.
 310     bool covered = false;
 311     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 312       if (_committed[cr].contains(new_end - 1)) {
 313         covered = true;
 314         break;
 315       }
 316     }
 317     assert(covered, "Card for end of new region not committed");
 318 #endif
 319 
 320     // The default of 0 is not necessarily clean cards.
 321     jbyte* entry;
 322     if (old_region.last() < _whole_heap.start()) {
 323       entry = byte_for(_whole_heap.start());
 324     } else {
 325       entry = byte_after(old_region.last());
 326     }
 327     assert(index_for(new_region.last()) <  _guard_index,
 328       "The guard card will be overwritten");
 329     // This line commented out cleans the newly expanded region and
 330     // not the aligned up expanded region.
 331     // jbyte* const end = byte_after(new_region.last());
 332     jbyte* const end = (jbyte*) new_end_for_commit;
 333     assert((end >= byte_after(new_region.last())) || collided || guarded,
 334       "Expect to be beyond new region unless impacting another region");
 335     // do nothing if we resized downward.
 336 #ifdef ASSERT
 337     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 338       if (ri != ind) {
 339         // The end of the new committed region should not
 340         // be in any existing region unless it matches
 341         // the start of the next region.
 342         assert(!_committed[ri].contains(end) ||
 343                (_committed[ri].start() == (HeapWord*) end),
 344                "Overlapping committed regions");
 345       }
 346     }
 347 #endif
 348     if (entry < end) {
 349       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 350     }
 351   }
 352   // In any case, the covered size changes.
 353   _covered[ind].set_word_size(new_region.word_size());
 354   if (TraceCardTableModRefBS) {
 355     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 356     gclog_or_tty->print_cr("  "
 357                   "  _covered[%d].start(): " INTPTR_FORMAT
 358                   "  _covered[%d].last(): " INTPTR_FORMAT,
 359                   ind, _covered[ind].start(),
 360                   ind, _covered[ind].last());
 361     gclog_or_tty->print_cr("  "
 362                   "  _committed[%d].start(): " INTPTR_FORMAT
 363                   "  _committed[%d].last(): " INTPTR_FORMAT,
 364                   ind, _committed[ind].start(),
 365                   ind, _committed[ind].last());
 366     gclog_or_tty->print_cr("  "
 367                   "  byte_for(start): " INTPTR_FORMAT
 368                   "  byte_for(last): " INTPTR_FORMAT,
 369                   byte_for(_covered[ind].start()),
 370                   byte_for(_covered[ind].last()));
 371     gclog_or_tty->print_cr("  "
 372                   "  addr_for(start): " INTPTR_FORMAT
 373                   "  addr_for(last): " INTPTR_FORMAT,
 374                   addr_for((jbyte*) _committed[ind].start()),
 375                   addr_for((jbyte*) _committed[ind].last()));
 376   }
 377   // Touch the last card of the covered region to show that it
 378   // is committed (or SEGV).
 379   debug_only(*byte_for(_covered[ind].last());)
 380   debug_only(verify_guard();)
 381 }
 382 
 383 // Note that these versions are precise!  The scanning code has to handle the
 384 // fact that the write barrier may be either precise or imprecise.
 385 
 386 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
 387   inline_write_ref_field(field, newVal);
 388 }
 389 
 390 /*
 391    Claimed and deferred bits are used together in G1 during the evacuation
 392    pause. These bits can have the following state transitions:
 393    1. The claimed bit can be put over any other card state. Except that
 394       the "dirty -> dirty and claimed" transition is checked for in
 395       G1 code and is not used.
 396    2. Deferred bit can be set only if the previous state of the card
 397       was either clean or claimed. mark_card_deferred() is wait-free.
 398       We do not care if the operation is be successful because if
 399       it does not it will only result in duplicate entry in the update
 400       buffer because of the "cache-miss". So it's not worth spinning.
 401  */
 402 
 403 
 404 bool CardTableModRefBS::claim_card(size_t card_index) {
 405   jbyte val = _byte_map[card_index];
 406   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
 407   while (val == clean_card_val() ||
 408          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
 409     jbyte new_val = val;
 410     if (val == clean_card_val()) {
 411       new_val = (jbyte)claimed_card_val();
 412     } else {
 413       new_val = val | (jbyte)claimed_card_val();
 414     }
 415     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 416     if (res == val) {
 417       return true;
 418     }
 419     val = res;
 420   }
 421   return false;
 422 }
 423 
 424 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
 425   jbyte val = _byte_map[card_index];
 426   // It's already processed
 427   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
 428     return false;
 429   }
 430   // Cached bit can be installed either on a clean card or on a claimed card.
 431   jbyte new_val = val;
 432   if (val == clean_card_val()) {
 433     new_val = (jbyte)deferred_card_val();
 434   } else {
 435     if (val & claimed_card_val()) {
 436       new_val = val | (jbyte)deferred_card_val();
 437     }
 438   }
 439   if (new_val != val) {
 440     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 441   }
 442   return true;
 443 }
 444 
 445 
 446 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
 447                                                MemRegion mr,
 448                                                DirtyCardToOopClosure* dcto_cl,
 449                                                MemRegionClosure* cl,
 450                                                bool clear) {
 451   if (!mr.is_empty()) {
 452     int n_threads = SharedHeap::heap()->n_par_threads();
 453     if (n_threads > 0) {
 454 #ifndef SERIALGC
 455       par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
 456 #else  // SERIALGC
 457       fatal("Parallel gc not supported here.");
 458 #endif // SERIALGC
 459     } else {
 460       non_clean_card_iterate_work(mr, cl, clear);
 461     }
 462   }
 463 }
 464 
 465 // NOTE: For this to work correctly, it is important that
 466 // we look for non-clean cards below (so as to catch those
 467 // marked precleaned), rather than look explicitly for dirty
 468 // cards (and miss those marked precleaned). In that sense,
 469 // the name precleaned is currently somewhat of a misnomer.
 470 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
 471                                                     MemRegionClosure* cl,
 472                                                     bool clear) {
 473   // Figure out whether we have to worry about parallelism.
 474   bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
 475   for (int i = 0; i < _cur_covered_regions; i++) {
 476     MemRegion mri = mr.intersection(_covered[i]);
 477     if (mri.word_size() > 0) {
 478       jbyte* cur_entry = byte_for(mri.last());
 479       jbyte* limit = byte_for(mri.start());
 480       while (cur_entry >= limit) {
 481         jbyte* next_entry = cur_entry - 1;
 482         if (*cur_entry != clean_card) {
 483           size_t non_clean_cards = 1;
 484           // Should the next card be included in this range of dirty cards.
 485           while (next_entry >= limit && *next_entry != clean_card) {
 486             non_clean_cards++;
 487             cur_entry = next_entry;
 488             next_entry--;
 489           }
 490           // The memory region may not be on a card boundary.  So that
 491           // objects beyond the end of the region are not processed, make
 492           // cur_cards precise with regard to the end of the memory region.
 493           MemRegion cur_cards(addr_for(cur_entry),
 494                               non_clean_cards * card_size_in_words);
 495           MemRegion dirty_region = cur_cards.intersection(mri);
 496           if (clear) {
 497             for (size_t i = 0; i < non_clean_cards; i++) {
 498               // Clean the dirty cards (but leave the other non-clean
 499               // alone.)  If parallel, do the cleaning atomically.
 500               jbyte cur_entry_val = cur_entry[i];
 501               if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
 502                 if (is_par) {
 503                   jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
 504                   assert(res != clean_card,
 505                          "Dirty card mysteriously cleaned");
 506                 } else {
 507                   cur_entry[i] = clean_card;
 508                 }
 509               }
 510             }
 511           }
 512           cl->do_MemRegion(dirty_region);
 513         }
 514         cur_entry = next_entry;
 515       }
 516     }
 517   }
 518 }
 519 
 520 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
 521                                                  OopClosure* cl,
 522                                                  bool clear,
 523                                                  bool before_save_marks) {
 524   // Note that dcto_cl is resource-allocated, so there is no
 525   // corresponding "delete".
 526   DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
 527   MemRegion used_mr;
 528   if (before_save_marks) {
 529     used_mr = sp->used_region_at_save_marks();
 530   } else {
 531     used_mr = sp->used_region();
 532   }
 533   non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
 534 }
 535 
 536 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 537   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 538   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 539   jbyte* cur  = byte_for(mr.start());
 540   jbyte* last = byte_after(mr.last());
 541   while (cur < last) {
 542     *cur = dirty_card;
 543     cur++;
 544   }
 545 }
 546 
 547 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 548   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 549   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 550   for (int i = 0; i < _cur_covered_regions; i++) {
 551     MemRegion mri = mr.intersection(_covered[i]);
 552     if (!mri.is_empty()) dirty_MemRegion(mri);
 553   }
 554 }
 555 
 556 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 557   // Be conservative: only clean cards entirely contained within the
 558   // region.
 559   jbyte* cur;
 560   if (mr.start() == _whole_heap.start()) {
 561     cur = byte_for(mr.start());
 562   } else {
 563     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 564     cur = byte_after(mr.start() - 1);
 565   }
 566   jbyte* last = byte_after(mr.last());
 567   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 568 }
 569 
 570 void CardTableModRefBS::clear(MemRegion mr) {
 571   for (int i = 0; i < _cur_covered_regions; i++) {
 572     MemRegion mri = mr.intersection(_covered[i]);
 573     if (!mri.is_empty()) clear_MemRegion(mri);
 574   }
 575 }
 576 
 577 void CardTableModRefBS::dirty(MemRegion mr) {
 578   jbyte* first = byte_for(mr.start());
 579   jbyte* last  = byte_after(mr.last());
 580   memset(first, dirty_card, last-first);
 581 }
 582 
 583 // NOTES:
 584 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
 585 //     iterates over dirty cards ranges in increasing address order.
 586 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 587                                            MemRegionClosure* cl) {
 588   for (int i = 0; i < _cur_covered_regions; i++) {
 589     MemRegion mri = mr.intersection(_covered[i]);
 590     if (!mri.is_empty()) {
 591       jbyte *cur_entry, *next_entry, *limit;
 592       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 593            cur_entry <= limit;
 594            cur_entry  = next_entry) {
 595         next_entry = cur_entry + 1;
 596         if (*cur_entry == dirty_card) {
 597           size_t dirty_cards;
 598           // Accumulate maximal dirty card range, starting at cur_entry
 599           for (dirty_cards = 1;
 600                next_entry <= limit && *next_entry == dirty_card;
 601                dirty_cards++, next_entry++);
 602           MemRegion cur_cards(addr_for(cur_entry),
 603                               dirty_cards*card_size_in_words);
 604           cl->do_MemRegion(cur_cards);
 605         }
 606       }
 607     }
 608   }
 609 }
 610 
 611 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 612                                                           bool reset,
 613                                                           int reset_val) {
 614   for (int i = 0; i < _cur_covered_regions; i++) {
 615     MemRegion mri = mr.intersection(_covered[i]);
 616     if (!mri.is_empty()) {
 617       jbyte* cur_entry, *next_entry, *limit;
 618       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 619            cur_entry <= limit;
 620            cur_entry  = next_entry) {
 621         next_entry = cur_entry + 1;
 622         if (*cur_entry == dirty_card) {
 623           size_t dirty_cards;
 624           // Accumulate maximal dirty card range, starting at cur_entry
 625           for (dirty_cards = 1;
 626                next_entry <= limit && *next_entry == dirty_card;
 627                dirty_cards++, next_entry++);
 628           MemRegion cur_cards(addr_for(cur_entry),
 629                               dirty_cards*card_size_in_words);
 630           if (reset) {
 631             for (size_t i = 0; i < dirty_cards; i++) {
 632               cur_entry[i] = reset_val;
 633             }
 634           }
 635           return cur_cards;
 636         }
 637       }
 638     }
 639   }
 640   return MemRegion(mr.end(), mr.end());
 641 }
 642 
 643 // Set all the dirty cards in the given region to "precleaned" state.
 644 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
 645   for (int i = 0; i < _cur_covered_regions; i++) {
 646     MemRegion mri = mr.intersection(_covered[i]);
 647     if (!mri.is_empty()) {
 648       jbyte *cur_entry, *limit;
 649       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 650            cur_entry <= limit;
 651            cur_entry++) {
 652         if (*cur_entry == dirty_card) {
 653           *cur_entry = precleaned_card;
 654         }
 655       }
 656     }
 657   }
 658 }
 659 
 660 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 661   return card_size * os::vm_page_size();
 662 }
 663 
 664 void CardTableModRefBS::verify_guard() {
 665   // For product build verification
 666   guarantee(_byte_map[_guard_index] == last_card,
 667             "card table guard has been modified");
 668 }
 669 
 670 void CardTableModRefBS::verify() {
 671   verify_guard();
 672 }
 673 
 674 #ifndef PRODUCT
 675 class GuaranteeNotModClosure: public MemRegionClosure {
 676   CardTableModRefBS* _ct;
 677 public:
 678   GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
 679   void do_MemRegion(MemRegion mr) {
 680     jbyte* entry = _ct->byte_for(mr.start());
 681     guarantee(*entry != CardTableModRefBS::clean_card,
 682               "Dirty card in region that should be clean");
 683   }
 684 };
 685 
 686 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
 687   GuaranteeNotModClosure blk(this);
 688   non_clean_card_iterate_work(mr, &blk, false);
 689 }
 690 
 691 // To verify a MemRegion is entirely dirty this closure is passed to
 692 // dirty_card_iterate. If the region is dirty do_MemRegion will be
 693 // invoked only once with a MemRegion equal to the one being
 694 // verified.
 695 class GuaranteeDirtyClosure: public MemRegionClosure {
 696   CardTableModRefBS* _ct;
 697   MemRegion _mr;
 698   bool _result;
 699 public:
 700   GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
 701     : _ct(ct), _mr(mr), _result(false) {}
 702   void do_MemRegion(MemRegion mr) {
 703     _result = _mr.equals(mr);
 704   }
 705   bool result() const { return _result; }
 706 };
 707 
 708 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 709   GuaranteeDirtyClosure blk(this, mr);
 710   dirty_card_iterate(mr, &blk);
 711   guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
 712 }
 713 #endif
 714 
 715 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
 716   return
 717     CardTableModRefBS::card_will_be_scanned(cv) ||
 718     _rs->is_prev_nonclean_card_val(cv);
 719 };
 720 
 721 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
 722   return
 723     cv != clean_card &&
 724     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
 725      CardTableRS::youngergen_may_have_been_dirty(cv));
 726 };