1 /*
   2  * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/cardTableModRefBS.hpp"
  28 #include "memory/cardTableRS.hpp"
  29 #include "memory/sharedHeap.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/virtualspace.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_LIR.hpp"
  38 #include "c1/c1_LIRGenerator.hpp"
  39 #endif
  40 
  41 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  42 // enumerate ref fields that have been modified (since the last
  43 // enumeration.)
  44 
  45 size_t CardTableModRefBS::cards_required(size_t covered_words)
  46 {
  47   // Add one for a guard card, used to detect errors.
  48   const size_t words = align_size_up(covered_words, card_size_in_words);
  49   return words / card_size_in_words + 1;
  50 }
  51 
  52 size_t CardTableModRefBS::compute_byte_map_size()
  53 {
  54   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  55                                         "unitialized, check declaration order");
  56   assert(_page_size != 0, "unitialized, check declaration order");
  57   const size_t granularity = os::vm_allocation_granularity();
  58   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  59 }
  60 
  61 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  62                                      int max_covered_regions):
  63   ModRefBarrierSet(max_covered_regions),
  64   _whole_heap(whole_heap),
  65   _guard_index(cards_required(whole_heap.word_size()) - 1),
  66   _last_valid_index(_guard_index - 1),
  67   _page_size(os::vm_page_size()),
  68   _byte_map_size(compute_byte_map_size())
  69 {
  70   _kind = BarrierSet::CardTableModRef;
  71 
  72   HeapWord* low_bound  = _whole_heap.start();
  73   HeapWord* high_bound = _whole_heap.end();
  74   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  75   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  76 
  77   assert(card_size <= 512, "card_size must be less than 512"); // why?
  78 
  79   _covered   = new MemRegion[max_covered_regions];
  80   _committed = new MemRegion[max_covered_regions];
  81   if (_covered == NULL || _committed == NULL)
  82     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  83   int i;
  84   for (i = 0; i < max_covered_regions; i++) {
  85     _covered[i].set_word_size(0);
  86     _committed[i].set_word_size(0);
  87   }
  88   _cur_covered_regions = 0;
  89 
  90   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  91     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  92   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  93   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  94                        _page_size, heap_rs.base(), heap_rs.size());
  95   if (!heap_rs.is_reserved()) {
  96     vm_exit_during_initialization("Could not reserve enough space for the "
  97                                   "card marking array");
  98   }
  99 
 100   // The assember store_check code will do an unsigned shift of the oop,
 101   // then add it to byte_map_base, i.e.
 102   //
 103   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 104   _byte_map = (jbyte*) heap_rs.base();
 105   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 106   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 107   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 108 
 109   jbyte* guard_card = &_byte_map[_guard_index];
 110   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 111   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 112   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 113     // Do better than this for Merlin
 114     vm_exit_out_of_memory(_page_size, "card table last card");
 115   }
 116   *guard_card = last_card;
 117 
 118    _lowest_non_clean =
 119     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
 120   _lowest_non_clean_chunk_size =
 121     NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
 122   _lowest_non_clean_base_chunk_index =
 123     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
 124   _last_LNC_resizing_collection =
 125     NEW_C_HEAP_ARRAY(int, max_covered_regions);
 126   if (_lowest_non_clean == NULL
 127       || _lowest_non_clean_chunk_size == NULL
 128       || _lowest_non_clean_base_chunk_index == NULL
 129       || _last_LNC_resizing_collection == NULL)
 130     vm_exit_during_initialization("couldn't allocate an LNC array.");
 131   for (i = 0; i < max_covered_regions; i++) {
 132     _lowest_non_clean[i] = NULL;
 133     _lowest_non_clean_chunk_size[i] = 0;
 134     _last_LNC_resizing_collection[i] = -1;
 135   }
 136 
 137   if (TraceCardTableModRefBS) {
 138     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 139     gclog_or_tty->print_cr("  "
 140                   "  &_byte_map[0]: " INTPTR_FORMAT
 141                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 142                   &_byte_map[0],
 143                   &_byte_map[_last_valid_index]);
 144     gclog_or_tty->print_cr("  "
 145                   "  byte_map_base: " INTPTR_FORMAT,
 146                   byte_map_base);
 147   }
 148 }
 149 
 150 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 151   int i;
 152   for (i = 0; i < _cur_covered_regions; i++) {
 153     if (_covered[i].start() == base) return i;
 154     if (_covered[i].start() > base) break;
 155   }
 156   // If we didn't find it, create a new one.
 157   assert(_cur_covered_regions < _max_covered_regions,
 158          "too many covered regions");
 159   // Move the ones above up, to maintain sorted order.
 160   for (int j = _cur_covered_regions; j > i; j--) {
 161     _covered[j] = _covered[j-1];
 162     _committed[j] = _committed[j-1];
 163   }
 164   int res = i;
 165   _cur_covered_regions++;
 166   _covered[res].set_start(base);
 167   _covered[res].set_word_size(0);
 168   jbyte* ct_start = byte_for(base);
 169   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 170   _committed[res].set_start((HeapWord*)ct_start_aligned);
 171   _committed[res].set_word_size(0);
 172   return res;
 173 }
 174 
 175 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 176   for (int i = 0; i < _cur_covered_regions; i++) {
 177     if (_covered[i].contains(addr)) {
 178       return i;
 179     }
 180   }
 181   assert(0, "address outside of heap?");
 182   return -1;
 183 }
 184 
 185 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 186   HeapWord* max_end = NULL;
 187   for (int j = 0; j < ind; j++) {
 188     HeapWord* this_end = _committed[j].end();
 189     if (this_end > max_end) max_end = this_end;
 190   }
 191   return max_end;
 192 }
 193 
 194 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 195                                                       MemRegion mr) const {
 196   MemRegion result = mr;
 197   for (int r = 0; r < _cur_covered_regions; r += 1) {
 198     if (r != self) {
 199       result = result.minus(_committed[r]);
 200     }
 201   }
 202   // Never include the guard page.
 203   result = result.minus(_guard_region);
 204   return result;
 205 }
 206 
 207 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 208   // We don't change the start of a region, only the end.
 209   assert(_whole_heap.contains(new_region),
 210            "attempt to cover area not in reserved area");
 211   debug_only(verify_guard();)
 212   // collided is true if the expansion would push into another committed region
 213   debug_only(bool collided = false;)
 214   int const ind = find_covering_region_by_base(new_region.start());
 215   MemRegion const old_region = _covered[ind];
 216   assert(old_region.start() == new_region.start(), "just checking");
 217   if (new_region.word_size() != old_region.word_size()) {
 218     // Commit new or uncommit old pages, if necessary.
 219     MemRegion cur_committed = _committed[ind];
 220     // Extend the end of this _commited region
 221     // to cover the end of any lower _committed regions.
 222     // This forms overlapping regions, but never interior regions.
 223     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 224     if (max_prev_end > cur_committed.end()) {
 225       cur_committed.set_end(max_prev_end);
 226     }
 227     // Align the end up to a page size (starts are already aligned).
 228     jbyte* const new_end = byte_after(new_region.last());
 229     HeapWord* new_end_aligned =
 230       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 231     assert(new_end_aligned >= (HeapWord*) new_end,
 232            "align up, but less");
 233     // Check the other regions (excludes "ind") to ensure that
 234     // the new_end_aligned does not intrude onto the committed
 235     // space of another region.
 236     int ri = 0;
 237     for (ri = 0; ri < _cur_covered_regions; ri++) {
 238       if (ri != ind) {
 239         if (_committed[ri].contains(new_end_aligned)) {
 240           // The prior check included in the assert
 241           // (new_end_aligned >= _committed[ri].start())
 242           // is redundant with the "contains" test.
 243           // Any region containing the new end
 244           // should start at or beyond the region found (ind)
 245           // for the new end (committed regions are not expected to
 246           // be proper subsets of other committed regions).
 247           assert(_committed[ri].start() >= _committed[ind].start(),
 248                  "New end of committed region is inconsistent");
 249           new_end_aligned = _committed[ri].start();
 250           // new_end_aligned can be equal to the start of its
 251           // committed region (i.e., of "ind") if a second
 252           // region following "ind" also start at the same location
 253           // as "ind".
 254           assert(new_end_aligned >= _committed[ind].start(),
 255             "New end of committed region is before start");
 256           debug_only(collided = true;)
 257           // Should only collide with 1 region
 258           break;
 259         }
 260       }
 261     }
 262 #ifdef ASSERT
 263     for (++ri; ri < _cur_covered_regions; ri++) {
 264       assert(!_committed[ri].contains(new_end_aligned),
 265         "New end of committed region is in a second committed region");
 266     }
 267 #endif
 268     // The guard page is always committed and should not be committed over.
 269     // "guarded" is used for assertion checking below and recalls the fact
 270     // that the would-be end of the new committed region would have
 271     // penetrated the guard page.
 272     HeapWord* new_end_for_commit = new_end_aligned;
 273 
 274     DEBUG_ONLY(bool guarded = false;)
 275     if (new_end_for_commit > _guard_region.start()) {
 276       new_end_for_commit = _guard_region.start();
 277       DEBUG_ONLY(guarded = true;)
 278     }
 279 
 280     if (new_end_for_commit > cur_committed.end()) {
 281       // Must commit new pages.
 282       MemRegion const new_committed =
 283         MemRegion(cur_committed.end(), new_end_for_commit);
 284 
 285       assert(!new_committed.is_empty(), "Region should not be empty here");
 286       if (!os::commit_memory((char*)new_committed.start(),
 287                              new_committed.byte_size(), _page_size)) {
 288         // Do better than this for Merlin
 289         vm_exit_out_of_memory(new_committed.byte_size(),
 290                 "card table expansion");
 291       }
 292     // Use new_end_aligned (as opposed to new_end_for_commit) because
 293     // the cur_committed region may include the guard region.
 294     } else if (new_end_aligned < cur_committed.end()) {
 295       // Must uncommit pages.
 296       MemRegion const uncommit_region =
 297         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 298                                                 cur_committed.end()));
 299       if (!uncommit_region.is_empty()) {
 300         // It is not safe to uncommit cards if the boundary between
 301         // the generations is moving.  A shrink can uncommit cards
 302         // owned by generation A but being used by generation B.
 303         if (!UseAdaptiveGCBoundary) {
 304           if (!os::uncommit_memory((char*)uncommit_region.start(),
 305                                    uncommit_region.byte_size())) {
 306             assert(false, "Card table contraction failed");
 307             // The call failed so don't change the end of the
 308             // committed region.  This is better than taking the
 309             // VM down.
 310             new_end_aligned = _committed[ind].end();
 311           }
 312         } else {
 313           new_end_aligned = _committed[ind].end();
 314         }
 315       }
 316     }
 317     // In any case, we can reset the end of the current committed entry.
 318     _committed[ind].set_end(new_end_aligned);
 319 
 320 #ifdef ASSERT
 321     // Check that the last card in the new region is committed according
 322     // to the tables.
 323     bool covered = false;
 324     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 325       if (_committed[cr].contains(new_end - 1)) {
 326         covered = true;
 327         break;
 328       }
 329     }
 330     assert(covered, "Card for end of new region not committed");
 331 #endif
 332 
 333     // The default of 0 is not necessarily clean cards.
 334     jbyte* entry;
 335     if (old_region.last() < _whole_heap.start()) {
 336       entry = byte_for(_whole_heap.start());
 337     } else {
 338       entry = byte_after(old_region.last());
 339     }
 340     assert(index_for(new_region.last()) <  _guard_index,
 341       "The guard card will be overwritten");
 342     // This line commented out cleans the newly expanded region and
 343     // not the aligned up expanded region.
 344     // jbyte* const end = byte_after(new_region.last());
 345     jbyte* const end = (jbyte*) new_end_for_commit;
 346     assert((end >= byte_after(new_region.last())) || collided || guarded,
 347       "Expect to be beyond new region unless impacting another region");
 348     // do nothing if we resized downward.
 349 #ifdef ASSERT
 350     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 351       if (ri != ind) {
 352         // The end of the new committed region should not
 353         // be in any existing region unless it matches
 354         // the start of the next region.
 355         assert(!_committed[ri].contains(end) ||
 356                (_committed[ri].start() == (HeapWord*) end),
 357                "Overlapping committed regions");
 358       }
 359     }
 360 #endif
 361     if (entry < end) {
 362       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 363     }
 364   }
 365   // In any case, the covered size changes.
 366   _covered[ind].set_word_size(new_region.word_size());
 367   if (TraceCardTableModRefBS) {
 368     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 369     gclog_or_tty->print_cr("  "
 370                   "  _covered[%d].start(): " INTPTR_FORMAT
 371                   "  _covered[%d].last(): " INTPTR_FORMAT,
 372                   ind, _covered[ind].start(),
 373                   ind, _covered[ind].last());
 374     gclog_or_tty->print_cr("  "
 375                   "  _committed[%d].start(): " INTPTR_FORMAT
 376                   "  _committed[%d].last(): " INTPTR_FORMAT,
 377                   ind, _committed[ind].start(),
 378                   ind, _committed[ind].last());
 379     gclog_or_tty->print_cr("  "
 380                   "  byte_for(start): " INTPTR_FORMAT
 381                   "  byte_for(last): " INTPTR_FORMAT,
 382                   byte_for(_covered[ind].start()),
 383                   byte_for(_covered[ind].last()));
 384     gclog_or_tty->print_cr("  "
 385                   "  addr_for(start): " INTPTR_FORMAT
 386                   "  addr_for(last): " INTPTR_FORMAT,
 387                   addr_for((jbyte*) _committed[ind].start()),
 388                   addr_for((jbyte*) _committed[ind].last()));
 389   }
 390   // Touch the last card of the covered region to show that it
 391   // is committed (or SEGV).
 392   debug_only((void) (*byte_for(_covered[ind].last()));)
 393   debug_only(verify_guard();)
 394 }
 395 
 396 // Note that these versions are precise!  The scanning code has to handle the
 397 // fact that the write barrier may be either precise or imprecise.
 398 
 399 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
 400   inline_write_ref_field(field, newVal);
 401 }
 402 
 403 /*
 404    Claimed and deferred bits are used together in G1 during the evacuation
 405    pause. These bits can have the following state transitions:
 406    1. The claimed bit can be put over any other card state. Except that
 407       the "dirty -> dirty and claimed" transition is checked for in
 408       G1 code and is not used.
 409    2. Deferred bit can be set only if the previous state of the card
 410       was either clean or claimed. mark_card_deferred() is wait-free.
 411       We do not care if the operation is be successful because if
 412       it does not it will only result in duplicate entry in the update
 413       buffer because of the "cache-miss". So it's not worth spinning.
 414  */
 415 
 416 
 417 bool CardTableModRefBS::claim_card(size_t card_index) {
 418   jbyte val = _byte_map[card_index];
 419   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
 420   while (val == clean_card_val() ||
 421          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
 422     jbyte new_val = val;
 423     if (val == clean_card_val()) {
 424       new_val = (jbyte)claimed_card_val();
 425     } else {
 426       new_val = val | (jbyte)claimed_card_val();
 427     }
 428     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 429     if (res == val) {
 430       return true;
 431     }
 432     val = res;
 433   }
 434   return false;
 435 }
 436 
 437 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
 438   jbyte val = _byte_map[card_index];
 439   // It's already processed
 440   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
 441     return false;
 442   }
 443   // Cached bit can be installed either on a clean card or on a claimed card.
 444   jbyte new_val = val;
 445   if (val == clean_card_val()) {
 446     new_val = (jbyte)deferred_card_val();
 447   } else {
 448     if (val & claimed_card_val()) {
 449       new_val = val | (jbyte)deferred_card_val();
 450     }
 451   }
 452   if (new_val != val) {
 453     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 454   }
 455   return true;
 456 }
 457 
 458 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 459                                                                  MemRegion mr,
 460                                                                  OopsInGenClosure* cl,
 461                                                                  CardTableRS* ct) {
 462   if (!mr.is_empty()) {
 463     // Caller (process_strong_roots()) claims that all GC threads
 464     // execute this call.  With UseDynamicNumberOfGCThreads now all
 465     // active GC threads execute this call.  The number of active GC
 466     // threads needs to be passed to par_non_clean_card_iterate_work()
 467     // to get proper partitioning and termination.
 468     //
 469     // This is an example of where n_par_threads() is used instead
 470     // of workers()->active_workers().  n_par_threads can be set to 0 to
 471     // turn off parallelism.  For example when this code is called as
 472     // part of verification and SharedHeap::process_strong_roots() is being
 473     // used, then n_par_threads() may have been set to 0.  active_workers
 474     // is not overloaded with the meaning that it is a switch to disable
 475     // parallelism and so keeps the meaning of the number of
 476     // active gc workers.  If parallelism has not been shut off by
 477     // setting n_par_threads to 0, then n_par_threads should be
 478     // equal to active_workers.  When a different mechanism for shutting
 479     // off parallelism is used, then active_workers can be used in
 480     // place of n_par_threads.
 481     //  This is an example of a path where n_par_threads is
 482     // set to 0 to turn off parallism.
 483     //  [7] CardTableModRefBS::non_clean_card_iterate()
 484     //  [8] CardTableRS::younger_refs_in_space_iterate()
 485     //  [9] Generation::younger_refs_in_space_iterate()
 486     //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
 487     //  [11] CompactingPermGenGen::younger_refs_iterate()
 488     //  [12] CardTableRS::younger_refs_iterate()
 489     //  [13] SharedHeap::process_strong_roots()
 490     //  [14] G1CollectedHeap::verify()
 491     //  [15] Universe::verify()
 492     //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
 493     //
 494     int n_threads =  SharedHeap::heap()->n_par_threads();
 495     bool is_par = n_threads > 0;
 496     if (is_par) {
 497 #ifndef SERIALGC
 498       assert(SharedHeap::heap()->n_par_threads() ==
 499              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
 500       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 501 #else  // SERIALGC
 502       fatal("Parallel gc not supported here.");
 503 #endif // SERIALGC
 504     } else {
 505       // We do not call the non_clean_card_iterate_serial() version below because
 506       // we want to clear the cards (which non_clean_card_iterate_serial() does not
 507       // do for us): clear_cl here does the work of finding contiguous dirty ranges
 508       // of cards to process and clear.
 509 
 510       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
 511                                                        cl->gen_boundary());
 512       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
 513 
 514       clear_cl.do_MemRegion(mr);
 515     }
 516   }
 517 }
 518 
 519 // The iterator itself is not MT-aware, but
 520 // MT-aware callers and closures can use this to
 521 // accomplish dirty card iteration in parallel. The
 522 // iterator itself does not clear the dirty cards, or
 523 // change their values in any manner.
 524 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
 525                                                       MemRegionClosure* cl) {
 526   bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
 527   assert(!is_par ||
 528           (SharedHeap::heap()->n_par_threads() ==
 529           SharedHeap::heap()->workers()->active_workers()), "Mismatch");
 530   for (int i = 0; i < _cur_covered_regions; i++) {
 531     MemRegion mri = mr.intersection(_covered[i]);
 532     if (mri.word_size() > 0) {
 533       jbyte* cur_entry = byte_for(mri.last());
 534       jbyte* limit = byte_for(mri.start());
 535       while (cur_entry >= limit) {
 536         jbyte* next_entry = cur_entry - 1;
 537         if (*cur_entry != clean_card) {
 538           size_t non_clean_cards = 1;
 539           // Should the next card be included in this range of dirty cards.
 540           while (next_entry >= limit && *next_entry != clean_card) {
 541             non_clean_cards++;
 542             cur_entry = next_entry;
 543             next_entry--;
 544           }
 545           // The memory region may not be on a card boundary.  So that
 546           // objects beyond the end of the region are not processed, make
 547           // cur_cards precise with regard to the end of the memory region.
 548           MemRegion cur_cards(addr_for(cur_entry),
 549                               non_clean_cards * card_size_in_words);
 550           MemRegion dirty_region = cur_cards.intersection(mri);
 551           cl->do_MemRegion(dirty_region);
 552         }
 553         cur_entry = next_entry;
 554       }
 555     }
 556   }
 557 }
 558 
 559 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 560   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 561   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 562   jbyte* cur  = byte_for(mr.start());
 563   jbyte* last = byte_after(mr.last());
 564   while (cur < last) {
 565     *cur = dirty_card;
 566     cur++;
 567   }
 568 }
 569 
 570 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 571   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 572   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 573   for (int i = 0; i < _cur_covered_regions; i++) {
 574     MemRegion mri = mr.intersection(_covered[i]);
 575     if (!mri.is_empty()) dirty_MemRegion(mri);
 576   }
 577 }
 578 
 579 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 580   // Be conservative: only clean cards entirely contained within the
 581   // region.
 582   jbyte* cur;
 583   if (mr.start() == _whole_heap.start()) {
 584     cur = byte_for(mr.start());
 585   } else {
 586     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 587     cur = byte_after(mr.start() - 1);
 588   }
 589   jbyte* last = byte_after(mr.last());
 590   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 591 }
 592 
 593 void CardTableModRefBS::clear(MemRegion mr) {
 594   for (int i = 0; i < _cur_covered_regions; i++) {
 595     MemRegion mri = mr.intersection(_covered[i]);
 596     if (!mri.is_empty()) clear_MemRegion(mri);
 597   }
 598 }
 599 
 600 void CardTableModRefBS::dirty(MemRegion mr) {
 601   jbyte* first = byte_for(mr.start());
 602   jbyte* last  = byte_after(mr.last());
 603   memset(first, dirty_card, last-first);
 604 }
 605 
 606 // Unlike several other card table methods, dirty_card_iterate()
 607 // iterates over dirty cards ranges in increasing address order.
 608 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 609                                            MemRegionClosure* cl) {
 610   for (int i = 0; i < _cur_covered_regions; i++) {
 611     MemRegion mri = mr.intersection(_covered[i]);
 612     if (!mri.is_empty()) {
 613       jbyte *cur_entry, *next_entry, *limit;
 614       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 615            cur_entry <= limit;
 616            cur_entry  = next_entry) {
 617         next_entry = cur_entry + 1;
 618         if (*cur_entry == dirty_card) {
 619           size_t dirty_cards;
 620           // Accumulate maximal dirty card range, starting at cur_entry
 621           for (dirty_cards = 1;
 622                next_entry <= limit && *next_entry == dirty_card;
 623                dirty_cards++, next_entry++);
 624           MemRegion cur_cards(addr_for(cur_entry),
 625                               dirty_cards*card_size_in_words);
 626           cl->do_MemRegion(cur_cards);
 627         }
 628       }
 629     }
 630   }
 631 }
 632 
 633 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 634                                                           bool reset,
 635                                                           int reset_val) {
 636   for (int i = 0; i < _cur_covered_regions; i++) {
 637     MemRegion mri = mr.intersection(_covered[i]);
 638     if (!mri.is_empty()) {
 639       jbyte* cur_entry, *next_entry, *limit;
 640       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 641            cur_entry <= limit;
 642            cur_entry  = next_entry) {
 643         next_entry = cur_entry + 1;
 644         if (*cur_entry == dirty_card) {
 645           size_t dirty_cards;
 646           // Accumulate maximal dirty card range, starting at cur_entry
 647           for (dirty_cards = 1;
 648                next_entry <= limit && *next_entry == dirty_card;
 649                dirty_cards++, next_entry++);
 650           MemRegion cur_cards(addr_for(cur_entry),
 651                               dirty_cards*card_size_in_words);
 652           if (reset) {
 653             for (size_t i = 0; i < dirty_cards; i++) {
 654               cur_entry[i] = reset_val;
 655             }
 656           }
 657           return cur_cards;
 658         }
 659       }
 660     }
 661   }
 662   return MemRegion(mr.end(), mr.end());
 663 }
 664 
 665 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 666   return card_size * os::vm_page_size();
 667 }
 668 
 669 void CardTableModRefBS::verify_guard() {
 670   // For product build verification
 671   guarantee(_byte_map[_guard_index] == last_card,
 672             "card table guard has been modified");
 673 }
 674 
 675 void CardTableModRefBS::verify() {
 676   verify_guard();
 677 }
 678 
 679 #ifndef PRODUCT
 680 void CardTableModRefBS::verify_region(MemRegion mr,
 681                                       jbyte val, bool val_equals) {
 682   jbyte* start    = byte_for(mr.start());
 683   jbyte* end      = byte_for(mr.last());
 684   bool   failures = false;
 685   for (jbyte* curr = start; curr <= end; ++curr) {
 686     jbyte curr_val = *curr;
 687     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 688     if (failed) {
 689       if (!failures) {
 690         tty->cr();
 691         tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
 692         tty->print_cr("==   %sexpecting value: %d",
 693                       (val_equals) ? "" : "not ", val);
 694         failures = true;
 695       }
 696       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
 697                     "val: %d", curr, addr_for(curr),
 698                     (HeapWord*) (((size_t) addr_for(curr)) + card_size),
 699                     (int) curr_val);
 700     }
 701   }
 702   guarantee(!failures, "there should not have been any failures");
 703 }
 704 
 705 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
 706   verify_region(mr, dirty_card, false /* val_equals */);
 707 }
 708 
 709 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 710   verify_region(mr, dirty_card, true /* val_equals */);
 711 }
 712 #endif
 713 
 714 void CardTableModRefBS::print_on(outputStream* st) const {
 715   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
 716                _byte_map, _byte_map + _byte_map_size, byte_map_base);
 717 }
 718 
 719 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
 720   return
 721     CardTableModRefBS::card_will_be_scanned(cv) ||
 722     _rs->is_prev_nonclean_card_val(cv);
 723 };
 724 
 725 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
 726   return
 727     cv != clean_card &&
 728     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
 729      CardTableRS::youngergen_may_have_been_dirty(cv));
 730 };