1 /*
   2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/cardTableModRefBS.hpp"
  28 #include "memory/cardTableRS.hpp"
  29 #include "memory/sharedHeap.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/virtualspace.hpp"
  36 #include "services/memTracker.hpp"
  37 #include "utilities/macros.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_LIR.hpp"
  40 #include "c1/c1_LIRGenerator.hpp"
  41 #endif
  42 
  43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  44 // enumerate ref fields that have been modified (since the last
  45 // enumeration.)
  46 
  47 size_t CardTableModRefBS::cards_required(size_t covered_words)
  48 {
  49   // Add one for a guard card, used to detect errors.
  50   const size_t words = align_size_up(covered_words, card_size_in_words);
  51   return words / card_size_in_words + 1;
  52 }
  53 
  54 size_t CardTableModRefBS::compute_byte_map_size()
  55 {
  56   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  57                                         "unitialized, check declaration order");
  58   assert(_page_size != 0, "unitialized, check declaration order");
  59   const size_t granularity = os::vm_allocation_granularity();
  60   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  61 }
  62 
  63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  64                                      int max_covered_regions):
  65   ModRefBarrierSet(max_covered_regions),
  66   _whole_heap(whole_heap),
  67   _guard_index(cards_required(whole_heap.word_size()) - 1),
  68   _last_valid_index(_guard_index - 1),
  69   _page_size(os::vm_page_size()),
  70   _byte_map_size(compute_byte_map_size())
  71 {
  72   _kind = BarrierSet::CardTableModRef;
  73 
  74   HeapWord* low_bound  = _whole_heap.start();
  75   HeapWord* high_bound = _whole_heap.end();
  76   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  77   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  78 
  79   assert(card_size <= 512, "card_size must be less than 512"); // why?
  80 
  81   _covered   = new MemRegion[max_covered_regions];
  82   _committed = new MemRegion[max_covered_regions];
  83   if (_covered == NULL || _committed == NULL) {
  84     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  85   }
  86 
  87   _cur_covered_regions = 0;
  88   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  89     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  90   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  91 
  92   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  93 
  94   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  95                        _page_size, heap_rs.base(), heap_rs.size());
  96   if (!heap_rs.is_reserved()) {
  97     vm_exit_during_initialization("Could not reserve enough space for the "
  98                                   "card marking array");
  99   }
 100 
 101   // The assember store_check code will do an unsigned shift of the oop,
 102   // then add it to byte_map_base, i.e.
 103   //
 104   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 105   _byte_map = (jbyte*) heap_rs.base();
 106   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 107   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 108   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 109 
 110   jbyte* guard_card = &_byte_map[_guard_index];
 111   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 112   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 113   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 114                             !ExecMem, "card table last card");
 115   *guard_card = last_card;
 116 
 117    _lowest_non_clean =
 118     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
 119   _lowest_non_clean_chunk_size =
 120     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
 121   _lowest_non_clean_base_chunk_index =
 122     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
 123   _last_LNC_resizing_collection =
 124     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
 125   if (_lowest_non_clean == NULL
 126       || _lowest_non_clean_chunk_size == NULL
 127       || _lowest_non_clean_base_chunk_index == NULL
 128       || _last_LNC_resizing_collection == NULL)
 129     vm_exit_during_initialization("couldn't allocate an LNC array.");
 130   for (int i = 0; i < max_covered_regions; i++) {
 131     _lowest_non_clean[i] = NULL;
 132     _lowest_non_clean_chunk_size[i] = 0;
 133     _last_LNC_resizing_collection[i] = -1;
 134   }
 135 
 136   if (TraceCardTableModRefBS) {
 137     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 138     gclog_or_tty->print_cr("  "
 139                   "  &_byte_map[0]: " INTPTR_FORMAT
 140                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 141                   &_byte_map[0],
 142                   &_byte_map[_last_valid_index]);
 143     gclog_or_tty->print_cr("  "
 144                   "  byte_map_base: " INTPTR_FORMAT,
 145                   byte_map_base);
 146   }
 147 }
 148 
 149 CardTableModRefBS::~CardTableModRefBS() {
 150   if (_covered) {
 151     delete[] _covered;
 152     _covered = NULL;
 153   }
 154   if (_committed) {
 155     delete[] _committed;
 156     _committed = NULL;
 157   }
 158   if (_lowest_non_clean) {
 159     FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
 160     _lowest_non_clean = NULL;
 161   }
 162   if (_lowest_non_clean_chunk_size) {
 163     FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
 164     _lowest_non_clean_chunk_size = NULL;
 165   }
 166   if (_lowest_non_clean_base_chunk_index) {
 167     FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
 168     _lowest_non_clean_base_chunk_index = NULL;
 169   }
 170   if (_last_LNC_resizing_collection) {
 171     FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
 172     _last_LNC_resizing_collection = NULL;
 173   }
 174 }
 175 
 176 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 177   int i;
 178   for (i = 0; i < _cur_covered_regions; i++) {
 179     if (_covered[i].start() == base) return i;
 180     if (_covered[i].start() > base) break;
 181   }
 182   // If we didn't find it, create a new one.
 183   assert(_cur_covered_regions < _max_covered_regions,
 184          "too many covered regions");
 185   // Move the ones above up, to maintain sorted order.
 186   for (int j = _cur_covered_regions; j > i; j--) {
 187     _covered[j] = _covered[j-1];
 188     _committed[j] = _committed[j-1];
 189   }
 190   int res = i;
 191   _cur_covered_regions++;
 192   _covered[res].set_start(base);
 193   _covered[res].set_word_size(0);
 194   jbyte* ct_start = byte_for(base);
 195   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 196   _committed[res].set_start((HeapWord*)ct_start_aligned);
 197   _committed[res].set_word_size(0);
 198   return res;
 199 }
 200 
 201 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 202   for (int i = 0; i < _cur_covered_regions; i++) {
 203     if (_covered[i].contains(addr)) {
 204       return i;
 205     }
 206   }
 207   assert(0, "address outside of heap?");
 208   return -1;
 209 }
 210 
 211 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 212   HeapWord* max_end = NULL;
 213   for (int j = 0; j < ind; j++) {
 214     HeapWord* this_end = _committed[j].end();
 215     if (this_end > max_end) max_end = this_end;
 216   }
 217   return max_end;
 218 }
 219 
 220 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 221                                                       MemRegion mr) const {
 222   MemRegion result = mr;
 223   for (int r = 0; r < _cur_covered_regions; r += 1) {
 224     if (r != self) {
 225       result = result.minus(_committed[r]);
 226     }
 227   }
 228   // Never include the guard page.
 229   result = result.minus(_guard_region);
 230   return result;
 231 }
 232 
 233 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 234   // We don't change the start of a region, only the end.
 235   assert(_whole_heap.contains(new_region),
 236            "attempt to cover area not in reserved area");
 237   debug_only(verify_guard();)
 238   // collided is true if the expansion would push into another committed region
 239   debug_only(bool collided = false;)
 240   int const ind = find_covering_region_by_base(new_region.start());
 241   MemRegion const old_region = _covered[ind];
 242   assert(old_region.start() == new_region.start(), "just checking");
 243   if (new_region.word_size() != old_region.word_size()) {
 244     // Commit new or uncommit old pages, if necessary.
 245     MemRegion cur_committed = _committed[ind];
 246     // Extend the end of this _commited region
 247     // to cover the end of any lower _committed regions.
 248     // This forms overlapping regions, but never interior regions.
 249     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 250     if (max_prev_end > cur_committed.end()) {
 251       cur_committed.set_end(max_prev_end);
 252     }
 253     // Align the end up to a page size (starts are already aligned).
 254     jbyte* const new_end = byte_after(new_region.last());
 255     HeapWord* new_end_aligned =
 256       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 257     assert(new_end_aligned >= (HeapWord*) new_end,
 258            "align up, but less");
 259     // Check the other regions (excludes "ind") to ensure that
 260     // the new_end_aligned does not intrude onto the committed
 261     // space of another region.
 262     int ri = 0;
 263     for (ri = 0; ri < _cur_covered_regions; ri++) {
 264       if (ri != ind) {
 265         if (_committed[ri].contains(new_end_aligned)) {
 266           // The prior check included in the assert
 267           // (new_end_aligned >= _committed[ri].start())
 268           // is redundant with the "contains" test.
 269           // Any region containing the new end
 270           // should start at or beyond the region found (ind)
 271           // for the new end (committed regions are not expected to
 272           // be proper subsets of other committed regions).
 273           assert(_committed[ri].start() >= _committed[ind].start(),
 274                  "New end of committed region is inconsistent");
 275           new_end_aligned = _committed[ri].start();
 276           // new_end_aligned can be equal to the start of its
 277           // committed region (i.e., of "ind") if a second
 278           // region following "ind" also start at the same location
 279           // as "ind".
 280           assert(new_end_aligned >= _committed[ind].start(),
 281             "New end of committed region is before start");
 282           debug_only(collided = true;)
 283           // Should only collide with 1 region
 284           break;
 285         }
 286       }
 287     }
 288 #ifdef ASSERT
 289     for (++ri; ri < _cur_covered_regions; ri++) {
 290       assert(!_committed[ri].contains(new_end_aligned),
 291         "New end of committed region is in a second committed region");
 292     }
 293 #endif
 294     // The guard page is always committed and should not be committed over.
 295     // "guarded" is used for assertion checking below and recalls the fact
 296     // that the would-be end of the new committed region would have
 297     // penetrated the guard page.
 298     HeapWord* new_end_for_commit = new_end_aligned;
 299 
 300     DEBUG_ONLY(bool guarded = false;)
 301     if (new_end_for_commit > _guard_region.start()) {
 302       new_end_for_commit = _guard_region.start();
 303       DEBUG_ONLY(guarded = true;)
 304     }
 305 
 306     if (new_end_for_commit > cur_committed.end()) {
 307       // Must commit new pages.
 308       MemRegion const new_committed =
 309         MemRegion(cur_committed.end(), new_end_for_commit);
 310 
 311       assert(!new_committed.is_empty(), "Region should not be empty here");
 312       os::commit_memory_or_exit((char*)new_committed.start(),
 313                                 new_committed.byte_size(), _page_size,
 314                                 !ExecMem, "card table expansion");
 315     // Use new_end_aligned (as opposed to new_end_for_commit) because
 316     // the cur_committed region may include the guard region.
 317     } else if (new_end_aligned < cur_committed.end()) {
 318       // Must uncommit pages.
 319       MemRegion const uncommit_region =
 320         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 321                                                 cur_committed.end()));
 322       if (!uncommit_region.is_empty()) {
 323         // It is not safe to uncommit cards if the boundary between
 324         // the generations is moving.  A shrink can uncommit cards
 325         // owned by generation A but being used by generation B.
 326         if (!UseAdaptiveGCBoundary) {
 327           if (!os::uncommit_memory((char*)uncommit_region.start(),
 328                                    uncommit_region.byte_size())) {
 329             assert(false, "Card table contraction failed");
 330             // The call failed so don't change the end of the
 331             // committed region.  This is better than taking the
 332             // VM down.
 333             new_end_aligned = _committed[ind].end();
 334           }
 335         } else {
 336           new_end_aligned = _committed[ind].end();
 337         }
 338       }
 339     }
 340     // In any case, we can reset the end of the current committed entry.
 341     _committed[ind].set_end(new_end_aligned);
 342 
 343 #ifdef ASSERT
 344     // Check that the last card in the new region is committed according
 345     // to the tables.
 346     bool covered = false;
 347     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 348       if (_committed[cr].contains(new_end - 1)) {
 349         covered = true;
 350         break;
 351       }
 352     }
 353     assert(covered, "Card for end of new region not committed");
 354 #endif
 355 
 356     // The default of 0 is not necessarily clean cards.
 357     jbyte* entry;
 358     if (old_region.last() < _whole_heap.start()) {
 359       entry = byte_for(_whole_heap.start());
 360     } else {
 361       entry = byte_after(old_region.last());
 362     }
 363     assert(index_for(new_region.last()) <  _guard_index,
 364       "The guard card will be overwritten");
 365     // This line commented out cleans the newly expanded region and
 366     // not the aligned up expanded region.
 367     // jbyte* const end = byte_after(new_region.last());
 368     jbyte* const end = (jbyte*) new_end_for_commit;
 369     assert((end >= byte_after(new_region.last())) || collided || guarded,
 370       "Expect to be beyond new region unless impacting another region");
 371     // do nothing if we resized downward.
 372 #ifdef ASSERT
 373     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 374       if (ri != ind) {
 375         // The end of the new committed region should not
 376         // be in any existing region unless it matches
 377         // the start of the next region.
 378         assert(!_committed[ri].contains(end) ||
 379                (_committed[ri].start() == (HeapWord*) end),
 380                "Overlapping committed regions");
 381       }
 382     }
 383 #endif
 384     if (entry < end) {
 385       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 386     }
 387   }
 388   // In any case, the covered size changes.
 389   _covered[ind].set_word_size(new_region.word_size());
 390   if (TraceCardTableModRefBS) {
 391     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 392     gclog_or_tty->print_cr("  "
 393                   "  _covered[%d].start(): " INTPTR_FORMAT
 394                   "  _covered[%d].last(): " INTPTR_FORMAT,
 395                   ind, _covered[ind].start(),
 396                   ind, _covered[ind].last());
 397     gclog_or_tty->print_cr("  "
 398                   "  _committed[%d].start(): " INTPTR_FORMAT
 399                   "  _committed[%d].last(): " INTPTR_FORMAT,
 400                   ind, _committed[ind].start(),
 401                   ind, _committed[ind].last());
 402     gclog_or_tty->print_cr("  "
 403                   "  byte_for(start): " INTPTR_FORMAT
 404                   "  byte_for(last): " INTPTR_FORMAT,
 405                   byte_for(_covered[ind].start()),
 406                   byte_for(_covered[ind].last()));
 407     gclog_or_tty->print_cr("  "
 408                   "  addr_for(start): " INTPTR_FORMAT
 409                   "  addr_for(last): " INTPTR_FORMAT,
 410                   addr_for((jbyte*) _committed[ind].start()),
 411                   addr_for((jbyte*) _committed[ind].last()));
 412   }
 413   // Touch the last card of the covered region to show that it
 414   // is committed (or SEGV).
 415   debug_only((void) (*byte_for(_covered[ind].last()));)
 416   debug_only(verify_guard();)
 417 }
 418 
 419 // Note that these versions are precise!  The scanning code has to handle the
 420 // fact that the write barrier may be either precise or imprecise.
 421 
 422 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
 423   inline_write_ref_field(field, newVal);
 424 }
 425 
 426 /*
 427    Claimed and deferred bits are used together in G1 during the evacuation
 428    pause. These bits can have the following state transitions:
 429    1. The claimed bit can be put over any other card state. Except that
 430       the "dirty -> dirty and claimed" transition is checked for in
 431       G1 code and is not used.
 432    2. Deferred bit can be set only if the previous state of the card
 433       was either clean or claimed. mark_card_deferred() is wait-free.
 434       We do not care if the operation is be successful because if
 435       it does not it will only result in duplicate entry in the update
 436       buffer because of the "cache-miss". So it's not worth spinning.
 437  */
 438 
 439 
 440 bool CardTableModRefBS::claim_card(size_t card_index) {
 441   jbyte val = _byte_map[card_index];
 442   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
 443   while (val == clean_card_val() ||
 444          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
 445     jbyte new_val = val;
 446     if (val == clean_card_val()) {
 447       new_val = (jbyte)claimed_card_val();
 448     } else {
 449       new_val = val | (jbyte)claimed_card_val();
 450     }
 451     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 452     if (res == val) {
 453       return true;
 454     }
 455     val = res;
 456   }
 457   return false;
 458 }
 459 
 460 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
 461   jbyte val = _byte_map[card_index];
 462   // It's already processed
 463   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
 464     return false;
 465   }
 466 
 467   if  (val == g1_young_gen) {
 468     // the card is for a young gen region. We don't need to keep track of all pointers into young
 469     return false;
 470   }
 471 
 472   // Cached bit can be installed either on a clean card or on a claimed card.
 473   jbyte new_val = val;
 474   if (val == clean_card_val()) {
 475     new_val = (jbyte)deferred_card_val();
 476   } else {
 477     if (val & claimed_card_val()) {
 478       new_val = val | (jbyte)deferred_card_val();
 479     }
 480   }
 481   if (new_val != val) {
 482     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
 483   }
 484   return true;
 485 }
 486 
 487 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
 488                                                                  MemRegion mr,
 489                                                                  OopsInGenClosure* cl,
 490                                                                  CardTableRS* ct) {
 491   if (!mr.is_empty()) {
 492     // Caller (process_strong_roots()) claims that all GC threads
 493     // execute this call.  With UseDynamicNumberOfGCThreads now all
 494     // active GC threads execute this call.  The number of active GC
 495     // threads needs to be passed to par_non_clean_card_iterate_work()
 496     // to get proper partitioning and termination.
 497     //
 498     // This is an example of where n_par_threads() is used instead
 499     // of workers()->active_workers().  n_par_threads can be set to 0 to
 500     // turn off parallelism.  For example when this code is called as
 501     // part of verification and SharedHeap::process_strong_roots() is being
 502     // used, then n_par_threads() may have been set to 0.  active_workers
 503     // is not overloaded with the meaning that it is a switch to disable
 504     // parallelism and so keeps the meaning of the number of
 505     // active gc workers.  If parallelism has not been shut off by
 506     // setting n_par_threads to 0, then n_par_threads should be
 507     // equal to active_workers.  When a different mechanism for shutting
 508     // off parallelism is used, then active_workers can be used in
 509     // place of n_par_threads.
 510     //  This is an example of a path where n_par_threads is
 511     // set to 0 to turn off parallism.
 512     //  [7] CardTableModRefBS::non_clean_card_iterate()
 513     //  [8] CardTableRS::younger_refs_in_space_iterate()
 514     //  [9] Generation::younger_refs_in_space_iterate()
 515     //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
 516     //  [11] CompactingPermGenGen::younger_refs_iterate()
 517     //  [12] CardTableRS::younger_refs_iterate()
 518     //  [13] SharedHeap::process_strong_roots()
 519     //  [14] G1CollectedHeap::verify()
 520     //  [15] Universe::verify()
 521     //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
 522     //
 523     int n_threads =  SharedHeap::heap()->n_par_threads();
 524     bool is_par = n_threads > 0;
 525     if (is_par) {
 526 #if INCLUDE_ALL_GCS
 527       assert(SharedHeap::heap()->n_par_threads() ==
 528              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
 529       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 530 #else  // INCLUDE_ALL_GCS
 531       fatal("Parallel gc not supported here.");
 532 #endif // INCLUDE_ALL_GCS
 533     } else {
 534       // We do not call the non_clean_card_iterate_serial() version below because
 535       // we want to clear the cards (which non_clean_card_iterate_serial() does not
 536       // do for us): clear_cl here does the work of finding contiguous dirty ranges
 537       // of cards to process and clear.
 538 
 539       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
 540                                                        cl->gen_boundary());
 541       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
 542 
 543       clear_cl.do_MemRegion(mr);
 544     }
 545   }
 546 }
 547 
 548 // The iterator itself is not MT-aware, but
 549 // MT-aware callers and closures can use this to
 550 // accomplish dirty card iteration in parallel. The
 551 // iterator itself does not clear the dirty cards, or
 552 // change their values in any manner.
 553 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
 554                                                       MemRegionClosure* cl) {
 555   bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
 556   assert(!is_par ||
 557           (SharedHeap::heap()->n_par_threads() ==
 558           SharedHeap::heap()->workers()->active_workers()), "Mismatch");
 559   for (int i = 0; i < _cur_covered_regions; i++) {
 560     MemRegion mri = mr.intersection(_covered[i]);
 561     if (mri.word_size() > 0) {
 562       jbyte* cur_entry = byte_for(mri.last());
 563       jbyte* limit = byte_for(mri.start());
 564       while (cur_entry >= limit) {
 565         jbyte* next_entry = cur_entry - 1;
 566         if (*cur_entry != clean_card) {
 567           size_t non_clean_cards = 1;
 568           // Should the next card be included in this range of dirty cards.
 569           while (next_entry >= limit && *next_entry != clean_card) {
 570             non_clean_cards++;
 571             cur_entry = next_entry;
 572             next_entry--;
 573           }
 574           // The memory region may not be on a card boundary.  So that
 575           // objects beyond the end of the region are not processed, make
 576           // cur_cards precise with regard to the end of the memory region.
 577           MemRegion cur_cards(addr_for(cur_entry),
 578                               non_clean_cards * card_size_in_words);
 579           MemRegion dirty_region = cur_cards.intersection(mri);
 580           cl->do_MemRegion(dirty_region);
 581         }
 582         cur_entry = next_entry;
 583       }
 584     }
 585   }
 586 }
 587 
 588 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 589   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 590   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 591   jbyte* cur  = byte_for(mr.start());
 592   jbyte* last = byte_after(mr.last());
 593   while (cur < last) {
 594     *cur = dirty_card;
 595     cur++;
 596   }
 597 }
 598 
 599 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 600   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 601   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 602   for (int i = 0; i < _cur_covered_regions; i++) {
 603     MemRegion mri = mr.intersection(_covered[i]);
 604     if (!mri.is_empty()) dirty_MemRegion(mri);
 605   }
 606 }
 607 
 608 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 609   // Be conservative: only clean cards entirely contained within the
 610   // region.
 611   jbyte* cur;
 612   if (mr.start() == _whole_heap.start()) {
 613     cur = byte_for(mr.start());
 614   } else {
 615     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 616     cur = byte_after(mr.start() - 1);
 617   }
 618   jbyte* last = byte_after(mr.last());
 619   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 620 }
 621 
 622 void CardTableModRefBS::clear(MemRegion mr) {
 623   for (int i = 0; i < _cur_covered_regions; i++) {
 624     MemRegion mri = mr.intersection(_covered[i]);
 625     if (!mri.is_empty()) clear_MemRegion(mri);
 626   }
 627 }
 628 
 629 void CardTableModRefBS::dirty(MemRegion mr) {
 630   jbyte* first = byte_for(mr.start());
 631   jbyte* last  = byte_after(mr.last());
 632   memset(first, dirty_card, last-first);
 633 }
 634 
 635 // Unlike several other card table methods, dirty_card_iterate()
 636 // iterates over dirty cards ranges in increasing address order.
 637 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 638                                            MemRegionClosure* cl) {
 639   for (int i = 0; i < _cur_covered_regions; i++) {
 640     MemRegion mri = mr.intersection(_covered[i]);
 641     if (!mri.is_empty()) {
 642       jbyte *cur_entry, *next_entry, *limit;
 643       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 644            cur_entry <= limit;
 645            cur_entry  = next_entry) {
 646         next_entry = cur_entry + 1;
 647         if (*cur_entry == dirty_card) {
 648           size_t dirty_cards;
 649           // Accumulate maximal dirty card range, starting at cur_entry
 650           for (dirty_cards = 1;
 651                next_entry <= limit && *next_entry == dirty_card;
 652                dirty_cards++, next_entry++);
 653           MemRegion cur_cards(addr_for(cur_entry),
 654                               dirty_cards*card_size_in_words);
 655           cl->do_MemRegion(cur_cards);
 656         }
 657       }
 658     }
 659   }
 660 }
 661 
 662 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 663                                                           bool reset,
 664                                                           int reset_val) {
 665   for (int i = 0; i < _cur_covered_regions; i++) {
 666     MemRegion mri = mr.intersection(_covered[i]);
 667     if (!mri.is_empty()) {
 668       jbyte* cur_entry, *next_entry, *limit;
 669       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 670            cur_entry <= limit;
 671            cur_entry  = next_entry) {
 672         next_entry = cur_entry + 1;
 673         if (*cur_entry == dirty_card) {
 674           size_t dirty_cards;
 675           // Accumulate maximal dirty card range, starting at cur_entry
 676           for (dirty_cards = 1;
 677                next_entry <= limit && *next_entry == dirty_card;
 678                dirty_cards++, next_entry++);
 679           MemRegion cur_cards(addr_for(cur_entry),
 680                               dirty_cards*card_size_in_words);
 681           if (reset) {
 682             for (size_t i = 0; i < dirty_cards; i++) {
 683               cur_entry[i] = reset_val;
 684             }
 685           }
 686           return cur_cards;
 687         }
 688       }
 689     }
 690   }
 691   return MemRegion(mr.end(), mr.end());
 692 }
 693 
 694 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 695   return card_size * os::vm_page_size();
 696 }
 697 
 698 void CardTableModRefBS::verify_guard() {
 699   // For product build verification
 700   guarantee(_byte_map[_guard_index] == last_card,
 701             "card table guard has been modified");
 702 }
 703 
 704 void CardTableModRefBS::verify() {
 705   verify_guard();
 706 }
 707 
 708 #ifndef PRODUCT
 709 void CardTableModRefBS::verify_region(MemRegion mr,
 710                                       jbyte val, bool val_equals) {
 711   jbyte* start    = byte_for(mr.start());
 712   jbyte* end      = byte_for(mr.last());
 713   bool   failures = false;
 714   for (jbyte* curr = start; curr <= end; ++curr) {
 715     jbyte curr_val = *curr;
 716     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 717     if (failed) {
 718       if (!failures) {
 719         tty->cr();
 720         tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end);
 721         tty->print_cr("==   %sexpecting value: %d",
 722                       (val_equals) ? "" : "not ", val);
 723         failures = true;
 724       }
 725       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
 726                     "val: %d", curr, addr_for(curr),
 727                     (HeapWord*) (((size_t) addr_for(curr)) + card_size),
 728                     (int) curr_val);
 729     }
 730   }
 731   guarantee(!failures, "there should not have been any failures");
 732 }
 733 
 734 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
 735   verify_region(mr, dirty_card, false /* val_equals */);
 736 }
 737 
 738 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 739   verify_region(mr, dirty_card, true /* val_equals */);
 740 }
 741 
 742 void CardTableModRefBS::verify_g1_young_region(MemRegion mr) {
 743   verify_region(mr, g1_young_gen,  true);
 744 }
 745 #endif
 746 
 747 void CardTableModRefBS::print_on(outputStream* st) const {
 748   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
 749                _byte_map, _byte_map + _byte_map_size, byte_map_base);
 750 }
 751 
 752 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
 753   return
 754     CardTableModRefBS::card_will_be_scanned(cv) ||
 755     _rs->is_prev_nonclean_card_val(cv);
 756 };
 757 
 758 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
 759   return
 760     cv != clean_card &&
 761     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
 762      CardTableRS::youngergen_may_have_been_dirty(cv));
 763 };