1 /*
   2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/cardTableModRefBS.hpp"
  28 #include "memory/cardTableRS.hpp"
  29 #include "memory/sharedHeap.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/virtualspace.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_LIR.hpp"
  38 #include "c1/c1_LIRGenerator.hpp"
  39 #endif
  40 
  41 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  42 // enumerate ref fields that have been modified (since the last
  43 // enumeration.)
  44 
  45 size_t CardTableModRefBS::cards_required(size_t covered_words)
  46 {
  47   // Add one for a guard card, used to detect errors.
  48   const size_t words = align_size_up(covered_words, card_size_in_words);
  49   return words / card_size_in_words + 1;
  50 }
  51 
  52 size_t CardTableModRefBS::compute_byte_map_size()
  53 {
  54   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  55                                         "unitialized, check declaration order");
  56   assert(_page_size != 0, "unitialized, check declaration order");
  57   const size_t granularity = os::vm_allocation_granularity();
  58   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  59 }
  60 
  61 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  62                                      int max_covered_regions):
  63   ModRefBarrierSet(max_covered_regions),
  64   _whole_heap(whole_heap),
  65   _guard_index(cards_required(whole_heap.word_size()) - 1),
  66   _last_valid_index(_guard_index - 1),
  67   _page_size(os::vm_page_size()),
  68   _byte_map_size(compute_byte_map_size())
  69 {
  70   _kind = BarrierSet::CardTableModRef;
  71 
  72   HeapWord* low_bound  = _whole_heap.start();
  73   HeapWord* high_bound = _whole_heap.end();
  74   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  75   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  76 
  77   assert(card_size <= 512, "card_size must be less than 512"); // why?
  78 
  79   _covered   = new MemRegion[max_covered_regions];
  80   _committed = new MemRegion[max_covered_regions];
  81   if (_covered == NULL || _committed == NULL)
  82     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  83   int i;
  84   for (i = 0; i < max_covered_regions; i++) {
  85     _covered[i].set_word_size(0);
  86     _committed[i].set_word_size(0);
  87   }
  88   _cur_covered_regions = 0;
  89 
  90   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  91     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  92   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  93   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  94                        _page_size, heap_rs.base(), heap_rs.size());
  95   if (!heap_rs.is_reserved()) {
  96     vm_exit_during_initialization("Could not reserve enough space for the "
  97                                   "card marking array");
  98   }
  99 
 100   // The assember store_check code will do an unsigned shift of the oop,
 101   // then add it to byte_map_base, i.e.
 102   //
 103   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 104   _byte_map = (jbyte*) heap_rs.base();
 105   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 106   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 107   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 108 
 109   jbyte* guard_card = &_byte_map[_guard_index];
 110   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 111   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 112   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 113     // Do better than this for Merlin
 114     vm_exit_out_of_memory(_page_size, "card table last card");
 115   }
 116   *guard_card = last_card;
 117 
 118    _lowest_non_clean =
 119     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
 120   _lowest_non_clean_chunk_size =
 121     NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
 122   _lowest_non_clean_base_chunk_index =
 123     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
 124   _last_LNC_resizing_collection =
 125     NEW_C_HEAP_ARRAY(int, max_covered_regions);
 126   if (_lowest_non_clean == NULL
 127       || _lowest_non_clean_chunk_size == NULL
 128       || _lowest_non_clean_base_chunk_index == NULL
 129       || _last_LNC_resizing_collection == NULL)
 130     vm_exit_during_initialization("couldn't allocate an LNC array.");
 131   for (i = 0; i < max_covered_regions; i++) {
 132     _lowest_non_clean[i] = NULL;
 133     _lowest_non_clean_chunk_size[i] = 0;
 134     _last_LNC_resizing_collection[i] = -1;
 135   }
 136 
 137   if (TraceCardTableModRefBS) {
 138     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 139     gclog_or_tty->print_cr("  "
 140                   "  &_byte_map[0]: " INTPTR_FORMAT
 141                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 142                   &_byte_map[0],
 143                   &_byte_map[_last_valid_index]);
 144     gclog_or_tty->print_cr("  "
 145                   "  byte_map_base: " INTPTR_FORMAT,
 146                   byte_map_base);
 147   }
 148 }
 149 
 150 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 151   int i;
 152   for (i = 0; i < _cur_covered_regions; i++) {
 153     if (_covered[i].start() == base) return i;
 154     if (_covered[i].start() > base) break;
 155   }
 156   // If we didn't find it, create a new one.
 157   assert(_cur_covered_regions < _max_covered_regions,
 158          "too many covered regions");
 159   // Move the ones above up, to maintain sorted order.
 160   for (int j = _cur_covered_regions; j > i; j--) {
 161     _covered[j] = _covered[j-1];
 162     _committed[j] = _committed[j-1];
 163   }
 164   int res = i;
 165   _cur_covered_regions++;
 166   _covered[res].set_start(base);
 167   _covered[res].set_word_size(0);
 168   jbyte* ct_start = byte_for(base);
 169   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 170   _committed[res].set_start((HeapWord*)ct_start_aligned);
 171   _committed[res].set_word_size(0);
 172   return res;
 173 }
 174 
 175 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 176   for (int i = 0; i < _cur_covered_regions; i++) {
 177     if (_covered[i].contains(addr)) {
 178       return i;
 179     }
 180   }
 181   assert(0, "address outside of heap?");
 182   return -1;
 183 }
 184 
 185 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 186   HeapWord* max_end = NULL;
 187   for (int j = 0; j < ind; j++) {
 188     HeapWord* this_end = _committed[j].end();
 189     if (this_end > max_end) max_end = this_end;
 190   }
 191   return max_end;
 192 }
 193 
 194 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 195                                                       MemRegion mr) const {
 196   MemRegion result = mr;
 197   for (int r = 0; r < _cur_covered_regions; r += 1) {
 198     if (r != self) {
 199       result = result.minus(_committed[r]);
 200     }
 201   }
 202   // Never include the guard page.
 203   result = result.minus(_guard_region);
 204   return result;
 205 }
 206 
 207 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 208   // We don't change the start of a region, only the end.
 209   assert(_whole_heap.contains(new_region),
 210            "attempt to cover area not in reserved area");
 211   debug_only(verify_guard();)
 212   // collided is true if the expansion would push into another committed region
 213   debug_only(bool collided = false;)
 214   int const ind = find_covering_region_by_base(new_region.start());
 215   MemRegion const old_region = _covered[ind];
 216   assert(old_region.start() == new_region.start(), "just checking");
 217   if (new_region.word_size() != old_region.word_size()) {
 218     // Commit new or uncommit old pages, if necessary.
 219     MemRegion cur_committed = _committed[ind];
 220     // Extend the end of this _commited region
 221     // to cover the end of any lower _committed regions.
 222     // This forms overlapping regions, but never interior regions.
 223     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 224     if (max_prev_end > cur_committed.end()) {
 225       cur_committed.set_end(max_prev_end);
 226     }
 227     // Align the end up to a page size (starts are already aligned).
 228     jbyte* const new_end = byte_after(new_region.last());
 229     HeapWord* new_end_aligned =
 230       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 231     assert(new_end_aligned >= (HeapWord*) new_end,
 232            "align up, but less");
 233     // Check the other regions (excludes "ind") to ensure that
 234     // the new_end_aligned does not intrude onto the committed
 235     // space of another region.
 236     int ri = 0;
 237     for (ri = 0; ri < _cur_covered_regions; ri++) {
 238       if (ri != ind) {
 239         if (_committed[ri].contains(new_end_aligned)) {
 240           // The prior check included in the assert
 241           // (new_end_aligned >= _committed[ri].start())
 242           // is redundant with the "contains" test.
 243           // Any region containing the new end
 244           // should start at or beyond the region found (ind)
 245           // for the new end (committed regions are not expected to
 246           // be proper subsets of other committed regions).
 247           assert(_committed[ri].start() >= _committed[ind].start(),
 248                  "New end of committed region is inconsistent");
 249           new_end_aligned = _committed[ri].start();
 250           // new_end_aligned can be equal to the start of its
 251           // committed region (i.e., of "ind") if a second
 252           // region following "ind" also start at the same location
 253           // as "ind".
 254           assert(new_end_aligned >= _committed[ind].start(),
 255             "New end of committed region is before start");
 256           debug_only(collided = true;)
 257           // Should only collide with 1 region
 258           break;
 259         }
 260       }
 261     }
 262 #ifdef ASSERT
 263     for (++ri; ri < _cur_covered_regions; ri++) {
 264       assert(!_committed[ri].contains(new_end_aligned),
 265         "New end of committed region is in a second committed region");
 266     }
 267 #endif
 268     // The guard page is always committed and should not be committed over.
 269     // "guarded" is used for assertion checking below and recalls the fact
 270     // that the would-be end of the new committed region would have
 271     // penetrated the guard page.
 272     HeapWord* new_end_for_commit = new_end_aligned;
 273 
 274     DEBUG_ONLY(bool guarded = false;)
 275     if (new_end_for_commit > _guard_region.start()) {
 276       new_end_for_commit = _guard_region.start();
 277       DEBUG_ONLY(guarded = true;)
 278     }
 279 
 280     if (new_end_for_commit > cur_committed.end()) {
 281       // Must commit new pages.
 282       MemRegion const new_committed =
 283         MemRegion(cur_committed.end(), new_end_for_commit);
 284 
 285       assert(!new_committed.is_empty(), "Region should not be empty here");
 286       if (!os::commit_memory((char*)new_committed.start(),
 287                              new_committed.byte_size(), _page_size)) {
 288         // Do better than this for Merlin
 289         vm_exit_out_of_memory(new_committed.byte_size(),
 290                 "card table expansion");
 291       }
 292     // Use new_end_aligned (as opposed to new_end_for_commit) because
 293     // the cur_committed region may include the guard region.
 294     } else if (new_end_aligned < cur_committed.end()) {
 295       // Must uncommit pages.
 296       MemRegion const uncommit_region =
 297         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 298                                                 cur_committed.end()));
 299       if (!uncommit_region.is_empty()) {
 300         // It is not safe to uncommit cards if the boundary between
 301         // the generations is moving.  A shrink can uncommit cards
 302         // owned by generation A but being used by generation B.
 303         if (!UseAdaptiveGCBoundary) {
 304           if (!os::uncommit_memory((char*)uncommit_region.start(),
 305                                    uncommit_region.byte_size())) {
 306             assert(false, "Card table contraction failed");
 307             // The call failed so don't change the end of the
 308             // committed region.  This is better than taking the
 309             // VM down.
 310             new_end_aligned = _committed[ind].end();
 311           }
 312         } else {
 313           new_end_aligned = _committed[ind].end();
 314         }
 315       }
 316     }
 317     // In any case, we can reset the end of the current committed entry.
 318     _committed[ind].set_end(new_end_aligned);
 319 
 320 #ifdef ASSERT
 321     // Check that the last card in the new region is committed according
 322     // to the tables.
 323     bool covered = false;
 324     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 325       if (_committed[cr].contains(new_end - 1)) {
 326         covered = true;
 327         break;
 328       }
 329     }
 330     assert(covered, "Card for end of new region not committed");
 331 #endif
 332 
 333     // The default of 0 is not necessarily clean cards.
 334     jbyte* entry;
 335     if (old_region.last() < _whole_heap.start()) {
 336       entry = byte_for(_whole_heap.start());
 337     } else {
 338       entry = byte_after(old_region.last());
 339     }
 340     assert(index_for(new_region.last()) <  _guard_index,
 341       "The guard card will be overwritten");
 342     // This line commented out cleans the newly expanded region and
 343     // not the aligned up expanded region.
 344     // jbyte* const end = byte_after(new_region.last());
 345     jbyte* const end = (jbyte*) new_end_for_commit;
 346     assert((end >= byte_after(new_region.last())) || collided || guarded,
 347       "Expect to be beyond new region unless impacting another region");
 348     // do nothing if we resized downward.
 349 #ifdef ASSERT
 350     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 351       if (ri != ind) {
 352         // The end of the new committed region should not
 353         // be in any existing region unless it matches
 354         // the start of the next region.
 355         assert(!_committed[ri].contains(end) ||
 356                (_committed[ri].start() == (HeapWord*) end),
 357                "Overlapping committed regions");
 358       }
 359     }
 360 #endif
 361     if (entry < end) {
 362       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 363     }
 364   }
 365   // In any case, the covered size changes.
 366   _covered[ind].set_word_size(new_region.word_size());
 367   if (TraceCardTableModRefBS) {
 368     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 369     gclog_or_tty->print_cr("  "
 370                   "  _covered[%d].start(): " INTPTR_FORMAT
 371                   "  _covered[%d].last(): " INTPTR_FORMAT,
 372                   ind, _covered[ind].start(),
 373                   ind, _covered[ind].last());
 374     gclog_or_tty->print_cr("  "
 375                   "  _committed[%d].start(): " INTPTR_FORMAT
 376                   "  _committed[%d].last(): " INTPTR_FORMAT,
 377                   ind, _committed[ind].start(),
 378                   ind, _committed[ind].last());
 379     gclog_or_tty->print_cr("  "
 380                   "  byte_for(start): " INTPTR_FORMAT
 381                   "  byte_for(last): " INTPTR_FORMAT,
 382                   byte_for(_covered[ind].start()),
 383                   byte_for(_covered[ind].last()));
 384     gclog_or_tty->print_cr("  "
 385                   "  addr_for(start): " INTPTR_FORMAT
 386                   "  addr_for(last): " INTPTR_FORMAT,
 387                   addr_for((jbyte*) _committed[ind].start()),
 388                   addr_for((jbyte*) _committed[ind].last()));
 389   }
 390   // Touch the last card of the covered region to show that it
 391   // is committed (or SEGV).
 392   debug_only(*byte_for(_covered[ind].last());)
 393   debug_only(verify_guard();)
 394 }
 395 
 396 // Note that these versions are precise!  The scanning code has to handle the
 397 // fact that the write barrier may be either precise or imprecise.
 398 
 399 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
 400   inline_write_ref_field(field, newVal);
 401 }
 402 
 403 /*
 404    Claimed and deferred bits are used together in G1 during the evacuation
 405    pause. These bits can have the following state transitions:
 406    1. The claimed bit can be put over any other card state. Except that
 407       the "dirty -> dirty and claimed" transition is checked for in
 408       G1 code and is not used.
 409    2. Deferred bit can be set only if the previous state of the card
 410       was either clean or claimed. mark_card_deferred() is wait-free.
 411       We do not care if the operation is be successful because if
 412       it does not it will only result in duplicate entry in the update
 413       buffer because of the "cache-miss". So it's not worth spinning.
 414  */
 415 
 416 
 417 bool CardTableModRefBS::claim_card(size_t card_index) {
 418   jbyte val = _byte_map[card_index];
 419   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
 420   while (val == clean_card_val() ||
 421          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
 422     jbyte new_val = val;
 423     if (val == clean_card_val()) {
 424       new_val = (jbyte)claimed_card_val();
 425     } else {
 426       new_val = val | (jbyte)claimed_card_val();
 427     }
 428     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 429     if (res == val) {
 430       return true;
 431     }
 432     val = res;
 433   }
 434   return false;
 435 }
 436 
 437 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
 438   jbyte val = _byte_map[card_index];
 439   // It's already processed
 440   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
 441     return false;
 442   }
 443   // Cached bit can be installed either on a clean card or on a claimed card.
 444   jbyte new_val = val;
 445   if (val == clean_card_val()) {
 446     new_val = (jbyte)deferred_card_val();
 447   } else {
 448     if (val & claimed_card_val()) {
 449       new_val = val | (jbyte)deferred_card_val();
 450     }
 451   }
 452   if (new_val != val) {
 453     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 454   }
 455   return true;
 456 }
 457 
 458 
 459 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
 460                                                MemRegion mr,
 461                                                DirtyCardToOopClosure* dcto_cl,
 462                                                MemRegionClosure* cl,
 463                                                bool clear) {
 464   if (!mr.is_empty()) {
 465     int n_threads = SharedHeap::heap()->n_par_threads();
 466     if (n_threads > 0) {
 467 #ifndef SERIALGC
 468       par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
 469 #else  // SERIALGC
 470       fatal("Parallel gc not supported here.");
 471 #endif // SERIALGC
 472     } else {
 473       non_clean_card_iterate_work(mr, cl, clear);
 474     }
 475   }
 476 }
 477 
 478 // NOTE: For this to work correctly, it is important that
 479 // we look for non-clean cards below (so as to catch those
 480 // marked precleaned), rather than look explicitly for dirty
 481 // cards (and miss those marked precleaned). In that sense,
 482 // the name precleaned is currently somewhat of a misnomer.
 483 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
 484                                                     MemRegionClosure* cl,
 485                                                     bool clear) {
 486   // Figure out whether we have to worry about parallelism.
 487   bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
 488   for (int i = 0; i < _cur_covered_regions; i++) {
 489     MemRegion mri = mr.intersection(_covered[i]);
 490     if (mri.word_size() > 0) {
 491       jbyte* cur_entry = byte_for(mri.last());
 492       jbyte* limit = byte_for(mri.start());
 493       while (cur_entry >= limit) {
 494         jbyte* next_entry = cur_entry - 1;
 495         if (*cur_entry != clean_card) {
 496           size_t non_clean_cards = 1;
 497           // Should the next card be included in this range of dirty cards.
 498           while (next_entry >= limit && *next_entry != clean_card) {
 499             non_clean_cards++;
 500             cur_entry = next_entry;
 501             next_entry--;
 502           }
 503           // The memory region may not be on a card boundary.  So that
 504           // objects beyond the end of the region are not processed, make
 505           // cur_cards precise with regard to the end of the memory region.
 506           MemRegion cur_cards(addr_for(cur_entry),
 507                               non_clean_cards * card_size_in_words);
 508           MemRegion dirty_region = cur_cards.intersection(mri);
 509           if (clear) {
 510             for (size_t i = 0; i < non_clean_cards; i++) {
 511               // Clean the dirty cards (but leave the other non-clean
 512               // alone.)  If parallel, do the cleaning atomically.
 513               jbyte cur_entry_val = cur_entry[i];
 514               if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
 515                 if (is_par) {
 516                   jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
 517                   assert(res != clean_card,
 518                          "Dirty card mysteriously cleaned");
 519                 } else {
 520                   cur_entry[i] = clean_card;
 521                 }
 522               }
 523             }
 524           }
 525           cl->do_MemRegion(dirty_region);
 526         }
 527         cur_entry = next_entry;
 528       }
 529     }
 530   }
 531 }
 532 
 533 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
 534                                                  OopClosure* cl,
 535                                                  bool clear,
 536                                                  bool before_save_marks) {
 537   // Note that dcto_cl is resource-allocated, so there is no
 538   // corresponding "delete".
 539   DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
 540   MemRegion used_mr;
 541   if (before_save_marks) {
 542     used_mr = sp->used_region_at_save_marks();
 543   } else {
 544     used_mr = sp->used_region();
 545   }
 546   non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
 547 }
 548 
 549 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 550   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 551   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 552   jbyte* cur  = byte_for(mr.start());
 553   jbyte* last = byte_after(mr.last());
 554   while (cur < last) {
 555     *cur = dirty_card;
 556     cur++;
 557   }
 558 }
 559 
 560 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 561   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 562   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 563   for (int i = 0; i < _cur_covered_regions; i++) {
 564     MemRegion mri = mr.intersection(_covered[i]);
 565     if (!mri.is_empty()) dirty_MemRegion(mri);
 566   }
 567 }
 568 
 569 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 570   // Be conservative: only clean cards entirely contained within the
 571   // region.
 572   jbyte* cur;
 573   if (mr.start() == _whole_heap.start()) {
 574     cur = byte_for(mr.start());
 575   } else {
 576     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 577     cur = byte_after(mr.start() - 1);
 578   }
 579   jbyte* last = byte_after(mr.last());
 580   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 581 }
 582 
 583 void CardTableModRefBS::clear(MemRegion mr) {
 584   for (int i = 0; i < _cur_covered_regions; i++) {
 585     MemRegion mri = mr.intersection(_covered[i]);
 586     if (!mri.is_empty()) clear_MemRegion(mri);
 587   }
 588 }
 589 
 590 void CardTableModRefBS::dirty(MemRegion mr) {
 591   jbyte* first = byte_for(mr.start());
 592   jbyte* last  = byte_after(mr.last());
 593   memset(first, dirty_card, last-first);
 594 }
 595 
 596 // NOTES:
 597 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
 598 //     iterates over dirty cards ranges in increasing address order.
 599 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 600                                            MemRegionClosure* cl) {
 601   for (int i = 0; i < _cur_covered_regions; i++) {
 602     MemRegion mri = mr.intersection(_covered[i]);
 603     if (!mri.is_empty()) {
 604       jbyte *cur_entry, *next_entry, *limit;
 605       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 606            cur_entry <= limit;
 607            cur_entry  = next_entry) {
 608         next_entry = cur_entry + 1;
 609         if (*cur_entry == dirty_card) {
 610           size_t dirty_cards;
 611           // Accumulate maximal dirty card range, starting at cur_entry
 612           for (dirty_cards = 1;
 613                next_entry <= limit && *next_entry == dirty_card;
 614                dirty_cards++, next_entry++);
 615           MemRegion cur_cards(addr_for(cur_entry),
 616                               dirty_cards*card_size_in_words);
 617           cl->do_MemRegion(cur_cards);
 618         }
 619       }
 620     }
 621   }
 622 }
 623 
 624 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 625                                                           bool reset,
 626                                                           int reset_val) {
 627   for (int i = 0; i < _cur_covered_regions; i++) {
 628     MemRegion mri = mr.intersection(_covered[i]);
 629     if (!mri.is_empty()) {
 630       jbyte* cur_entry, *next_entry, *limit;
 631       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 632            cur_entry <= limit;
 633            cur_entry  = next_entry) {
 634         next_entry = cur_entry + 1;
 635         if (*cur_entry == dirty_card) {
 636           size_t dirty_cards;
 637           // Accumulate maximal dirty card range, starting at cur_entry
 638           for (dirty_cards = 1;
 639                next_entry <= limit && *next_entry == dirty_card;
 640                dirty_cards++, next_entry++);
 641           MemRegion cur_cards(addr_for(cur_entry),
 642                               dirty_cards*card_size_in_words);
 643           if (reset) {
 644             for (size_t i = 0; i < dirty_cards; i++) {
 645               cur_entry[i] = reset_val;
 646             }
 647           }
 648           return cur_cards;
 649         }
 650       }
 651     }
 652   }
 653   return MemRegion(mr.end(), mr.end());
 654 }
 655 
 656 // Set all the dirty cards in the given region to "precleaned" state.
 657 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
 658   for (int i = 0; i < _cur_covered_regions; i++) {
 659     MemRegion mri = mr.intersection(_covered[i]);
 660     if (!mri.is_empty()) {
 661       jbyte *cur_entry, *limit;
 662       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 663            cur_entry <= limit;
 664            cur_entry++) {
 665         if (*cur_entry == dirty_card) {
 666           *cur_entry = precleaned_card;
 667         }
 668       }
 669     }
 670   }
 671 }
 672 
 673 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 674   return card_size * os::vm_page_size();
 675 }
 676 
 677 void CardTableModRefBS::verify_guard() {
 678   // For product build verification
 679   guarantee(_byte_map[_guard_index] == last_card,
 680             "card table guard has been modified");
 681 }
 682 
 683 void CardTableModRefBS::verify() {
 684   verify_guard();
 685 }
 686 
 687 #ifndef PRODUCT
 688 class GuaranteeNotModClosure: public MemRegionClosure {
 689   CardTableModRefBS* _ct;
 690 public:
 691   GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
 692   void do_MemRegion(MemRegion mr) {
 693     jbyte* entry = _ct->byte_for(mr.start());
 694     guarantee(*entry != CardTableModRefBS::clean_card,
 695               "Dirty card in region that should be clean");
 696   }
 697 };
 698 
 699 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
 700   GuaranteeNotModClosure blk(this);
 701   non_clean_card_iterate_work(mr, &blk, false);
 702 }
 703 
 704 // To verify a MemRegion is entirely dirty this closure is passed to
 705 // dirty_card_iterate. If the region is dirty do_MemRegion will be
 706 // invoked only once with a MemRegion equal to the one being
 707 // verified.
 708 class GuaranteeDirtyClosure: public MemRegionClosure {
 709   CardTableModRefBS* _ct;
 710   MemRegion _mr;
 711   bool _result;
 712 public:
 713   GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
 714     : _ct(ct), _mr(mr), _result(false) {}
 715   void do_MemRegion(MemRegion mr) {
 716     _result = _mr.equals(mr);
 717   }
 718   bool result() const { return _result; }
 719 };
 720 
 721 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 722   GuaranteeDirtyClosure blk(this, mr);
 723   dirty_card_iterate(mr, &blk);
 724   guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
 725 }
 726 #endif
 727 
 728 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
 729   return
 730     CardTableModRefBS::card_will_be_scanned(cv) ||
 731     _rs->is_prev_nonclean_card_val(cv);
 732 };
 733 
 734 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
 735   return
 736     cv != clean_card &&
 737     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
 738      CardTableRS::youngergen_may_have_been_dirty(cv));
 739 };