1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/cardTableModRefBS.hpp"
  28 #include "memory/cardTableRS.hpp"
  29 #include "memory/sharedHeap.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/virtualspace.hpp"
  36 #include "services/memTracker.hpp"
  37 #include "utilities/macros.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_LIR.hpp"
  40 #include "c1/c1_LIRGenerator.hpp"
  41 #endif
  42 
  43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  44 // enumerate ref fields that have been modified (since the last
  45 // enumeration.)
  46 
  47 size_t CardTableModRefBS::cards_required(size_t covered_words)
  48 {
  49   // Add one for a guard card, used to detect errors.
  50   const size_t words = align_size_up(covered_words, card_size_in_words);
  51   return words / card_size_in_words + 1;
  52 }
  53 
  54 size_t CardTableModRefBS::compute_byte_map_size()
  55 {
  56   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  57                                         "unitialized, check declaration order");
  58   assert(_page_size != 0, "unitialized, check declaration order");
  59   const size_t granularity = os::vm_allocation_granularity();
  60   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  61 }
  62 
  63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  64                                      int max_covered_regions):
  65   ModRefBarrierSet(max_covered_regions),
  66   _whole_heap(whole_heap),
  67   _guard_index(cards_required(whole_heap.word_size()) - 1),
  68   _last_valid_index(_guard_index - 1),
  69   _page_size(os::vm_page_size()),
  70   _byte_map_size(compute_byte_map_size())
  71 {
  72   _kind = BarrierSet::CardTableModRef;
  73 
  74   HeapWord* low_bound  = _whole_heap.start();
  75   HeapWord* high_bound = _whole_heap.end();
  76   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  77   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  78 
  79   assert(card_size <= 512, "card_size must be less than 512"); // why?
  80 
  81   _covered   = new MemRegion[max_covered_regions];
  82   _committed = new MemRegion[max_covered_regions];
  83   if (_covered == NULL || _committed == NULL)
  84     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  85   int i;
  86   for (i = 0; i < max_covered_regions; i++) {
  87     _covered[i].set_word_size(0);
  88     _committed[i].set_word_size(0);
  89   }
  90   _cur_covered_regions = 0;
  91 
  92   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  93     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  94   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  95 
  96   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  97 
  98   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  99                        _page_size, heap_rs.base(), heap_rs.size());
 100   if (!heap_rs.is_reserved()) {
 101     vm_exit_during_initialization("Could not reserve enough space for the "
 102                                   "card marking array");
 103   }
 104 
 105   // The assember store_check code will do an unsigned shift of the oop,
 106   // then add it to byte_map_base, i.e.
 107   //
 108   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 109   _byte_map = (jbyte*) heap_rs.base();
 110   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 111   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 112   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 113 
 114   jbyte* guard_card = &_byte_map[_guard_index];
 115   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 116   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 117   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
 118     // Do better than this for Merlin
 119     vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
 120   }
 121 
 122   *guard_card = last_card;
 123 
 124    _lowest_non_clean =
 125     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
 126   _lowest_non_clean_chunk_size =
 127     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
 128   _lowest_non_clean_base_chunk_index =
 129     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
 130   _last_LNC_resizing_collection =
 131     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
 132   if (_lowest_non_clean == NULL
 133       || _lowest_non_clean_chunk_size == NULL
 134       || _lowest_non_clean_base_chunk_index == NULL
 135       || _last_LNC_resizing_collection == NULL)
 136     vm_exit_during_initialization("couldn't allocate an LNC array.");
 137   for (i = 0; i < max_covered_regions; i++) {
 138     _lowest_non_clean[i] = NULL;
 139     _lowest_non_clean_chunk_size[i] = 0;
 140     _last_LNC_resizing_collection[i] = -1;
 141   }
 142 
 143   if (TraceCardTableModRefBS) {
 144     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 145     gclog_or_tty->print_cr("  "
 146                   "  &_byte_map[0]: " INTPTR_FORMAT
 147                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 148                   &_byte_map[0],
 149                   &_byte_map[_last_valid_index]);
 150     gclog_or_tty->print_cr("  "
 151                   "  byte_map_base: " INTPTR_FORMAT,
 152                   byte_map_base);
 153   }
 154 }
 155 
 156 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 157   int i;
 158   for (i = 0; i < _cur_covered_regions; i++) {
 159     if (_covered[i].start() == base) return i;
 160     if (_covered[i].start() > base) break;
 161   }
 162   // If we didn't find it, create a new one.
 163   assert(_cur_covered_regions < _max_covered_regions,
 164          "too many covered regions");
 165   // Move the ones above up, to maintain sorted order.
 166   for (int j = _cur_covered_regions; j > i; j--) {
 167     _covered[j] = _covered[j-1];
 168     _committed[j] = _committed[j-1];
 169   }
 170   int res = i;
 171   _cur_covered_regions++;
 172   _covered[res].set_start(base);
 173   _covered[res].set_word_size(0);
 174   jbyte* ct_start = byte_for(base);
 175   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 176   _committed[res].set_start((HeapWord*)ct_start_aligned);
 177   _committed[res].set_word_size(0);
 178   return res;
 179 }
 180 
 181 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 182   for (int i = 0; i < _cur_covered_regions; i++) {
 183     if (_covered[i].contains(addr)) {
 184       return i;
 185     }
 186   }
 187   assert(0, "address outside of heap?");
 188   return -1;
 189 }
 190 
 191 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 192   HeapWord* max_end = NULL;
 193   for (int j = 0; j < ind; j++) {
 194     HeapWord* this_end = _committed[j].end();
 195     if (this_end > max_end) max_end = this_end;
 196   }
 197   return max_end;
 198 }
 199 
 200 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 201                                                       MemRegion mr) const {
 202   MemRegion result = mr;
 203   for (int r = 0; r < _cur_covered_regions; r += 1) {
 204     if (r != self) {
 205       result = result.minus(_committed[r]);
 206     }
 207   }
 208   // Never include the guard page.
 209   result = result.minus(_guard_region);
 210   return result;
 211 }
 212 
 213 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 214   // We don't change the start of a region, only the end.
 215   assert(_whole_heap.contains(new_region),
 216            "attempt to cover area not in reserved area");
 217   debug_only(verify_guard();)
 218   // collided is true if the expansion would push into another committed region
 219   debug_only(bool collided = false;)
 220   int const ind = find_covering_region_by_base(new_region.start());
 221   MemRegion const old_region = _covered[ind];
 222   assert(old_region.start() == new_region.start(), "just checking");
 223   if (new_region.word_size() != old_region.word_size()) {
 224     // Commit new or uncommit old pages, if necessary.
 225     MemRegion cur_committed = _committed[ind];
 226     // Extend the end of this _commited region
 227     // to cover the end of any lower _committed regions.
 228     // This forms overlapping regions, but never interior regions.
 229     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 230     if (max_prev_end > cur_committed.end()) {
 231       cur_committed.set_end(max_prev_end);
 232     }
 233     // Align the end up to a page size (starts are already aligned).
 234     jbyte* const new_end = byte_after(new_region.last());
 235     HeapWord* new_end_aligned =
 236       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 237     assert(new_end_aligned >= (HeapWord*) new_end,
 238            "align up, but less");
 239     // Check the other regions (excludes "ind") to ensure that
 240     // the new_end_aligned does not intrude onto the committed
 241     // space of another region.
 242     int ri = 0;
 243     for (ri = 0; ri < _cur_covered_regions; ri++) {
 244       if (ri != ind) {
 245         if (_committed[ri].contains(new_end_aligned)) {
 246           // The prior check included in the assert
 247           // (new_end_aligned >= _committed[ri].start())
 248           // is redundant with the "contains" test.
 249           // Any region containing the new end
 250           // should start at or beyond the region found (ind)
 251           // for the new end (committed regions are not expected to
 252           // be proper subsets of other committed regions).
 253           assert(_committed[ri].start() >= _committed[ind].start(),
 254                  "New end of committed region is inconsistent");
 255           new_end_aligned = _committed[ri].start();
 256           // new_end_aligned can be equal to the start of its
 257           // committed region (i.e., of "ind") if a second
 258           // region following "ind" also start at the same location
 259           // as "ind".
 260           assert(new_end_aligned >= _committed[ind].start(),
 261             "New end of committed region is before start");
 262           debug_only(collided = true;)
 263           // Should only collide with 1 region
 264           break;
 265         }
 266       }
 267     }
 268 #ifdef ASSERT
 269     for (++ri; ri < _cur_covered_regions; ri++) {
 270       assert(!_committed[ri].contains(new_end_aligned),
 271         "New end of committed region is in a second committed region");
 272     }
 273 #endif
 274     // The guard page is always committed and should not be committed over.
 275     // "guarded" is used for assertion checking below and recalls the fact
 276     // that the would-be end of the new committed region would have
 277     // penetrated the guard page.
 278     HeapWord* new_end_for_commit = new_end_aligned;
 279 
 280     DEBUG_ONLY(bool guarded = false;)
 281     if (new_end_for_commit > _guard_region.start()) {
 282       new_end_for_commit = _guard_region.start();
 283       DEBUG_ONLY(guarded = true;)
 284     }
 285 
 286     if (new_end_for_commit > cur_committed.end()) {
 287       // Must commit new pages.
 288       MemRegion const new_committed =
 289         MemRegion(cur_committed.end(), new_end_for_commit);
 290 
 291       assert(!new_committed.is_empty(), "Region should not be empty here");
 292       if (!os::commit_memory((char*)new_committed.start(),
 293                              new_committed.byte_size(), _page_size)) {
 294         // Do better than this for Merlin
 295         vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
 296                 "card table expansion");
 297       }
 298     // Use new_end_aligned (as opposed to new_end_for_commit) because
 299     // the cur_committed region may include the guard region.
 300     } else if (new_end_aligned < cur_committed.end()) {
 301       // Must uncommit pages.
 302       MemRegion const uncommit_region =
 303         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 304                                                 cur_committed.end()));
 305       if (!uncommit_region.is_empty()) {
 306         // It is not safe to uncommit cards if the boundary between
 307         // the generations is moving.  A shrink can uncommit cards
 308         // owned by generation A but being used by generation B.
 309         if (!UseAdaptiveGCBoundary) {
 310           if (!os::uncommit_memory((char*)uncommit_region.start(),
 311                                    uncommit_region.byte_size())) {
 312             assert(false, "Card table contraction failed");
 313             // The call failed so don't change the end of the
 314             // committed region.  This is better than taking the
 315             // VM down.
 316             new_end_aligned = _committed[ind].end();
 317           }
 318         } else {
 319           new_end_aligned = _committed[ind].end();
 320         }
 321       }
 322     }
 323     // In any case, we can reset the end of the current committed entry.
 324     _committed[ind].set_end(new_end_aligned);
 325 
 326 #ifdef ASSERT
 327     // Check that the last card in the new region is committed according
 328     // to the tables.
 329     bool covered = false;
 330     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 331       if (_committed[cr].contains(new_end - 1)) {
 332         covered = true;
 333         break;
 334       }
 335     }
 336     assert(covered, "Card for end of new region not committed");
 337 #endif
 338 
 339     // The default of 0 is not necessarily clean cards.
 340     jbyte* entry;
 341     if (old_region.last() < _whole_heap.start()) {
 342       entry = byte_for(_whole_heap.start());
 343     } else {
 344       entry = byte_after(old_region.last());
 345     }
 346     assert(index_for(new_region.last()) <  _guard_index,
 347       "The guard card will be overwritten");
 348     // This line commented out cleans the newly expanded region and
 349     // not the aligned up expanded region.
 350     // jbyte* const end = byte_after(new_region.last());
 351     jbyte* const end = (jbyte*) new_end_for_commit;
 352     assert((end >= byte_after(new_region.last())) || collided || guarded,
 353       "Expect to be beyond new region unless impacting another region");
 354     // do nothing if we resized downward.
 355 #ifdef ASSERT
 356     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 357       if (ri != ind) {
 358         // The end of the new committed region should not
 359         // be in any existing region unless it matches
 360         // the start of the next region.
 361         assert(!_committed[ri].contains(end) ||
 362                (_committed[ri].start() == (HeapWord*) end),
 363                "Overlapping committed regions");
 364       }
 365     }
 366 #endif
 367     if (entry < end) {
 368       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 369     }
 370   }
 371   // In any case, the covered size changes.
 372   _covered[ind].set_word_size(new_region.word_size());
 373   if (TraceCardTableModRefBS) {
 374     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 375     gclog_or_tty->print_cr("  "
 376                   "  _covered[%d].start(): " INTPTR_FORMAT
 377                   "  _covered[%d].last(): " INTPTR_FORMAT,
 378                   ind, _covered[ind].start(),
 379                   ind, _covered[ind].last());
 380     gclog_or_tty->print_cr("  "
 381                   "  _committed[%d].start(): " INTPTR_FORMAT
 382                   "  _committed[%d].last(): " INTPTR_FORMAT,
 383                   ind, _committed[ind].start(),
 384                   ind, _committed[ind].last());
 385     gclog_or_tty->print_cr("  "
 386                   "  byte_for(start): " INTPTR_FORMAT
 387                   "  byte_for(last): " INTPTR_FORMAT,
 388                   byte_for(_covered[ind].start()),
 389                   byte_for(_covered[ind].last()));
 390     gclog_or_tty->print_cr("  "
 391                   "  addr_for(start): " INTPTR_FORMAT
 392                   "  addr_for(last): " INTPTR_FORMAT,
 393                   addr_for((jbyte*) _committed[ind].start()),
 394                   addr_for((jbyte*) _committed[ind].last()));
 395   }
 396   // Touch the last card of the covered region to show that it
 397   // is committed (or SEGV).
 398   debug_only(*byte_for(_covered[ind].last());)
 399   debug_only(verify_guard();)
 400 }
 401 
 402 // Note that these versions are precise!  The scanning code has to handle the
 403 // fact that the write barrier may be either precise or imprecise.
 404 
 405 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
 406   inline_write_ref_field(field, newVal);
 407 }
 408 
 409 /*
 410    Claimed and deferred bits are used together in G1 during the evacuation
 411    pause. These bits can have the following state transitions:
 412    1. The claimed bit can be put over any other card state. Except that
 413       the "dirty -> dirty and claimed" transition is checked for in
 414       G1 code and is not used.
 415    2. Deferred bit can be set only if the previous state of the card
 416       was either clean or claimed. mark_card_deferred() is wait-free.
 417       We do not care if the operation is be successful because if
 418       it does not it will only result in duplicate entry in the update
 419       buffer because of the "cache-miss". So it's not worth spinning.
 420  */
 421 
 422 
 423 bool CardTableModRefBS::claim_card(size_t card_index) {
 424   jbyte val = _byte_map[card_index];
 425   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
 426   while (val == clean_card_val() ||
 427          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
 428     jbyte new_val = val;
 429     if (val == clean_card_val()) {
 430       new_val = (jbyte)claimed_card_val();
 431     } else {
 432       new_val = val | (jbyte)claimed_card_val();
 433     }
 434     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 435     if (res == val) {
 436       return true;
 437     }
 438     val = res;
 439   }
 440   return false;
 441 }
 442 
 443 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
 444   jbyte val = _byte_map[card_index];
 445   // It's already processed
 446   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
 447     return false;
 448   }
 449   // Cached bit can be installed either on a clean card or on a claimed card.
 450   jbyte new_val = val;
 451   if (val == clean_card_val()) {
 452     new_val = (jbyte)deferred_card_val();
 453   } else {
 454     if (val & claimed_card_val()) {
 455       new_val = val | (jbyte)deferred_card_val();
 456     }
 457   }
 458   if (new_val != val) {
 459     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 460   }
 461   return true;
 462 }
 463 
 464 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 465                                                                  MemRegion mr,
 466                                                                  OopsInGenClosure* cl,
 467                                                                  CardTableRS* ct) {
 468   if (!mr.is_empty()) {
 469     // Caller (process_strong_roots()) claims that all GC threads
 470     // execute this call.  With UseDynamicNumberOfGCThreads now all
 471     // active GC threads execute this call.  The number of active GC
 472     // threads needs to be passed to par_non_clean_card_iterate_work()
 473     // to get proper partitioning and termination.
 474     //
 475     // This is an example of where n_par_threads() is used instead
 476     // of workers()->active_workers().  n_par_threads can be set to 0 to
 477     // turn off parallelism.  For example when this code is called as
 478     // part of verification and SharedHeap::process_strong_roots() is being
 479     // used, then n_par_threads() may have been set to 0.  active_workers
 480     // is not overloaded with the meaning that it is a switch to disable
 481     // parallelism and so keeps the meaning of the number of
 482     // active gc workers.  If parallelism has not been shut off by
 483     // setting n_par_threads to 0, then n_par_threads should be
 484     // equal to active_workers.  When a different mechanism for shutting
 485     // off parallelism is used, then active_workers can be used in
 486     // place of n_par_threads.
 487     //  This is an example of a path where n_par_threads is
 488     // set to 0 to turn off parallism.
 489     //  [7] CardTableModRefBS::non_clean_card_iterate()
 490     //  [8] CardTableRS::younger_refs_in_space_iterate()
 491     //  [9] Generation::younger_refs_in_space_iterate()
 492     //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
 493     //  [11] CompactingPermGenGen::younger_refs_iterate()
 494     //  [12] CardTableRS::younger_refs_iterate()
 495     //  [13] SharedHeap::process_strong_roots()
 496     //  [14] G1CollectedHeap::verify()
 497     //  [15] Universe::verify()
 498     //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
 499     //
 500     int n_threads =  SharedHeap::heap()->n_par_threads();
 501     bool is_par = n_threads > 0;
 502     if (is_par) {
 503 #if INCLUDE_ALL_GCS
 504       assert(SharedHeap::heap()->n_par_threads() ==
 505              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
 506       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 507 #else  // INCLUDE_ALL_GCS
 508       fatal("Parallel gc not supported here.");
 509 #endif // INCLUDE_ALL_GCS
 510     } else {
 511       // We do not call the non_clean_card_iterate_serial() version below because
 512       // we want to clear the cards (which non_clean_card_iterate_serial() does not
 513       // do for us): clear_cl here does the work of finding contiguous dirty ranges
 514       // of cards to process and clear.
 515 
 516       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
 517                                                        cl->gen_boundary());
 518       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
 519 
 520       clear_cl.do_MemRegion(mr);
 521     }
 522   }
 523 }
 524 
 525 // The iterator itself is not MT-aware, but
 526 // MT-aware callers and closures can use this to
 527 // accomplish dirty card iteration in parallel. The
 528 // iterator itself does not clear the dirty cards, or
 529 // change their values in any manner.
 530 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
 531                                                       MemRegionClosure* cl) {
 532   bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
 533   assert(!is_par ||
 534           (SharedHeap::heap()->n_par_threads() ==
 535           SharedHeap::heap()->workers()->active_workers()), "Mismatch");
 536   for (int i = 0; i < _cur_covered_regions; i++) {
 537     MemRegion mri = mr.intersection(_covered[i]);
 538     if (mri.word_size() > 0) {
 539       jbyte* cur_entry = byte_for(mri.last());
 540       jbyte* limit = byte_for(mri.start());
 541       while (cur_entry >= limit) {
 542         jbyte* next_entry = cur_entry - 1;
 543         if (*cur_entry != clean_card) {
 544           size_t non_clean_cards = 1;
 545           // Should the next card be included in this range of dirty cards.
 546           while (next_entry >= limit && *next_entry != clean_card) {
 547             non_clean_cards++;
 548             cur_entry = next_entry;
 549             next_entry--;
 550           }
 551           // The memory region may not be on a card boundary.  So that
 552           // objects beyond the end of the region are not processed, make
 553           // cur_cards precise with regard to the end of the memory region.
 554           MemRegion cur_cards(addr_for(cur_entry),
 555                               non_clean_cards * card_size_in_words);
 556           MemRegion dirty_region = cur_cards.intersection(mri);
 557           cl->do_MemRegion(dirty_region);
 558         }
 559         cur_entry = next_entry;
 560       }
 561     }
 562   }
 563 }
 564 
 565 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 566   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 567   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 568   jbyte* cur  = byte_for(mr.start());
 569   jbyte* last = byte_after(mr.last());
 570   while (cur < last) {
 571     *cur = dirty_card;
 572     cur++;
 573   }
 574 }
 575 
 576 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 577   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 578   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 579   for (int i = 0; i < _cur_covered_regions; i++) {
 580     MemRegion mri = mr.intersection(_covered[i]);
 581     if (!mri.is_empty()) dirty_MemRegion(mri);
 582   }
 583 }
 584 
 585 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 586   // Be conservative: only clean cards entirely contained within the
 587   // region.
 588   jbyte* cur;
 589   if (mr.start() == _whole_heap.start()) {
 590     cur = byte_for(mr.start());
 591   } else {
 592     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 593     cur = byte_after(mr.start() - 1);
 594   }
 595   jbyte* last = byte_after(mr.last());
 596   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 597 }
 598 
 599 void CardTableModRefBS::clear(MemRegion mr) {
 600   for (int i = 0; i < _cur_covered_regions; i++) {
 601     MemRegion mri = mr.intersection(_covered[i]);
 602     if (!mri.is_empty()) clear_MemRegion(mri);
 603   }
 604 }
 605 
 606 void CardTableModRefBS::dirty(MemRegion mr) {
 607   jbyte* first = byte_for(mr.start());
 608   jbyte* last  = byte_after(mr.last());
 609   memset(first, dirty_card, last-first);
 610 }
 611 
 612 // Unlike several other card table methods, dirty_card_iterate()
 613 // iterates over dirty cards ranges in increasing address order.
 614 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 615                                            MemRegionClosure* cl) {
 616   for (int i = 0; i < _cur_covered_regions; i++) {
 617     MemRegion mri = mr.intersection(_covered[i]);
 618     if (!mri.is_empty()) {
 619       jbyte *cur_entry, *next_entry, *limit;
 620       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 621            cur_entry <= limit;
 622            cur_entry  = next_entry) {
 623         next_entry = cur_entry + 1;
 624         if (*cur_entry == dirty_card) {
 625           size_t dirty_cards;
 626           // Accumulate maximal dirty card range, starting at cur_entry
 627           for (dirty_cards = 1;
 628                next_entry <= limit && *next_entry == dirty_card;
 629                dirty_cards++, next_entry++);
 630           MemRegion cur_cards(addr_for(cur_entry),
 631                               dirty_cards*card_size_in_words);
 632           cl->do_MemRegion(cur_cards);
 633         }
 634       }
 635     }
 636   }
 637 }
 638 
 639 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 640                                                           bool reset,
 641                                                           int reset_val) {
 642   for (int i = 0; i < _cur_covered_regions; i++) {
 643     MemRegion mri = mr.intersection(_covered[i]);
 644     if (!mri.is_empty()) {
 645       jbyte* cur_entry, *next_entry, *limit;
 646       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 647            cur_entry <= limit;
 648            cur_entry  = next_entry) {
 649         next_entry = cur_entry + 1;
 650         if (*cur_entry == dirty_card) {
 651           size_t dirty_cards;
 652           // Accumulate maximal dirty card range, starting at cur_entry
 653           for (dirty_cards = 1;
 654                next_entry <= limit && *next_entry == dirty_card;
 655                dirty_cards++, next_entry++);
 656           MemRegion cur_cards(addr_for(cur_entry),
 657                               dirty_cards*card_size_in_words);
 658           if (reset) {
 659             for (size_t i = 0; i < dirty_cards; i++) {
 660               cur_entry[i] = reset_val;
 661             }
 662           }
 663           return cur_cards;
 664         }
 665       }
 666     }
 667   }
 668   return MemRegion(mr.end(), mr.end());
 669 }
 670 
 671 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 672   return card_size * os::vm_page_size();
 673 }
 674 
 675 void CardTableModRefBS::verify_guard() {
 676   // For product build verification
 677   guarantee(_byte_map[_guard_index] == last_card,
 678             "card table guard has been modified");
 679 }
 680 
 681 void CardTableModRefBS::verify() {
 682   verify_guard();
 683 }
 684 
 685 #ifndef PRODUCT
 686 void CardTableModRefBS::verify_region(MemRegion mr,
 687                                       jbyte val, bool val_equals) {
 688   jbyte* start    = byte_for(mr.start());
 689   jbyte* end      = byte_for(mr.last());
 690   bool   failures = false;
 691   for (jbyte* curr = start; curr <= end; ++curr) {
 692     jbyte curr_val = *curr;
 693     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 694     if (failed) {
 695       if (!failures) {
 696         tty->cr();
 697         tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end);
 698         tty->print_cr("==   %sexpecting value: %d",
 699                       (val_equals) ? "" : "not ", val);
 700         failures = true;
 701       }
 702       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
 703                     "val: %d", curr, addr_for(curr),
 704                     (HeapWord*) (((size_t) addr_for(curr)) + card_size),
 705                     (int) curr_val);
 706     }
 707   }
 708   guarantee(!failures, "there should not have been any failures");
 709 }
 710 
 711 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
 712   verify_region(mr, dirty_card, false /* val_equals */);
 713 }
 714 
 715 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 716   verify_region(mr, dirty_card, true /* val_equals */);
 717 }
 718 #endif
 719 
 720 void CardTableModRefBS::print_on(outputStream* st) const {
 721   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
 722                _byte_map, _byte_map + _byte_map_size, byte_map_base);
 723 }
 724 
 725 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
 726   return
 727     CardTableModRefBS::card_will_be_scanned(cv) ||
 728     _rs->is_prev_nonclean_card_val(cv);
 729 };
 730 
 731 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
 732   return
 733     cv != clean_card &&
 734     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
 735      CardTableRS::youngergen_may_have_been_dirty(cv));
 736 };