1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/cardTableModRefBS.inline.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/genCollectedHeap.hpp"
  29 #include "gc/shared/space.inline.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/virtualspace.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/thread.hpp"
  34 #include "services/memTracker.hpp"
  35 #include "utilities/align.hpp"
  36 #include "utilities/macros.hpp"
  37 
  38 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  39 // enumerate ref fields that have been modified (since the last
  40 // enumeration.)
  41 
  42 size_t CardTableModRefBS::compute_byte_map_size()
  43 {
  44   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  45                                         "uninitialized, check declaration order");
  46   assert(_page_size != 0, "uninitialized, check declaration order");
  47   const size_t granularity = os::vm_allocation_granularity();
  48   return align_up(_guard_index + 1, MAX2(_page_size, granularity));
  49 }
  50 
  51 CardTableModRefBS::CardTableModRefBS(
  52   MemRegion whole_heap,
  53   const BarrierSet::FakeRtti& fake_rtti) :
  54   ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
  55   _whole_heap(whole_heap),
  56   _guard_index(0),
  57   _guard_region(),
  58   _last_valid_index(0),
  59   _page_size(os::vm_page_size()),
  60   _byte_map_size(0),
  61   _covered(NULL),
  62   _committed(NULL),
  63   _cur_covered_regions(0),
  64   _byte_map(NULL),
  65   byte_map_base(NULL),
  66   _defer_initial_card_mark(false)
  67 {
  68   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  69   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  70 
  71   assert(card_size <= 512, "card_size must be less than 512"); // why?
  72 
  73   _covered   = new MemRegion[_max_covered_regions];
  74   if (_covered == NULL) {
  75     vm_exit_during_initialization("Could not allocate card table covered region set.");
  76   }
  77 }
  78 
  79 void CardTableModRefBS::initialize() {
  80   initialize_deferred_card_mark_barriers();
  81   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  82   _last_valid_index = _guard_index - 1;
  83 
  84   _byte_map_size = compute_byte_map_size();
  85 
  86   HeapWord* low_bound  = _whole_heap.start();
  87   HeapWord* high_bound = _whole_heap.end();
  88 
  89   _cur_covered_regions = 0;
  90   _committed = new MemRegion[_max_covered_regions];
  91   if (_committed == NULL) {
  92     vm_exit_during_initialization("Could not allocate card table committed region set.");
  93   }
  94 
  95   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  96     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  97   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  98 
  99   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
 100 
 101   os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
 102                        _page_size, heap_rs.base(), heap_rs.size());
 103   if (!heap_rs.is_reserved()) {
 104     vm_exit_during_initialization("Could not reserve enough space for the "
 105                                   "card marking array");
 106   }
 107 
 108   // The assembler store_check code will do an unsigned shift of the oop,
 109   // then add it to byte_map_base, i.e.
 110   //
 111   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 112   _byte_map = (jbyte*) heap_rs.base();
 113   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 114   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 115   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 116 
 117   jbyte* guard_card = &_byte_map[_guard_index];
 118   uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
 119   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 120   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 121                             !ExecMem, "card table last card");
 122   *guard_card = last_card;
 123 
 124   log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
 125   log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 126                   p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
 127   log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
 128 }
 129 
 130 CardTableModRefBS::~CardTableModRefBS() {
 131   if (_covered) {
 132     delete[] _covered;
 133     _covered = NULL;
 134   }
 135   if (_committed) {
 136     delete[] _committed;
 137     _committed = NULL;
 138   }
 139 }
 140 
 141 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 142   int i;
 143   for (i = 0; i < _cur_covered_regions; i++) {
 144     if (_covered[i].start() == base) return i;
 145     if (_covered[i].start() > base) break;
 146   }
 147   // If we didn't find it, create a new one.
 148   assert(_cur_covered_regions < _max_covered_regions,
 149          "too many covered regions");
 150   // Move the ones above up, to maintain sorted order.
 151   for (int j = _cur_covered_regions; j > i; j--) {
 152     _covered[j] = _covered[j-1];
 153     _committed[j] = _committed[j-1];
 154   }
 155   int res = i;
 156   _cur_covered_regions++;
 157   _covered[res].set_start(base);
 158   _covered[res].set_word_size(0);
 159   jbyte* ct_start = byte_for(base);
 160   uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
 161   _committed[res].set_start((HeapWord*)ct_start_aligned);
 162   _committed[res].set_word_size(0);
 163   return res;
 164 }
 165 
 166 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 167   for (int i = 0; i < _cur_covered_regions; i++) {
 168     if (_covered[i].contains(addr)) {
 169       return i;
 170     }
 171   }
 172   assert(0, "address outside of heap?");
 173   return -1;
 174 }
 175 
 176 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 177   HeapWord* max_end = NULL;
 178   for (int j = 0; j < ind; j++) {
 179     HeapWord* this_end = _committed[j].end();
 180     if (this_end > max_end) max_end = this_end;
 181   }
 182   return max_end;
 183 }
 184 
 185 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 186                                                       MemRegion mr) const {
 187   MemRegion result = mr;
 188   for (int r = 0; r < _cur_covered_regions; r += 1) {
 189     if (r != self) {
 190       result = result.minus(_committed[r]);
 191     }
 192   }
 193   // Never include the guard page.
 194   result = result.minus(_guard_region);
 195   return result;
 196 }
 197 
 198 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 199   // We don't change the start of a region, only the end.
 200   assert(_whole_heap.contains(new_region),
 201            "attempt to cover area not in reserved area");
 202   debug_only(verify_guard();)
 203   // collided is true if the expansion would push into another committed region
 204   debug_only(bool collided = false;)
 205   int const ind = find_covering_region_by_base(new_region.start());
 206   MemRegion const old_region = _covered[ind];
 207   assert(old_region.start() == new_region.start(), "just checking");
 208   if (new_region.word_size() != old_region.word_size()) {
 209     // Commit new or uncommit old pages, if necessary.
 210     MemRegion cur_committed = _committed[ind];
 211     // Extend the end of this _committed region
 212     // to cover the end of any lower _committed regions.
 213     // This forms overlapping regions, but never interior regions.
 214     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 215     if (max_prev_end > cur_committed.end()) {
 216       cur_committed.set_end(max_prev_end);
 217     }
 218     // Align the end up to a page size (starts are already aligned).
 219     jbyte* const new_end = byte_after(new_region.last());
 220     HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
 221     assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
 222     // Check the other regions (excludes "ind") to ensure that
 223     // the new_end_aligned does not intrude onto the committed
 224     // space of another region.
 225     int ri = 0;
 226     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
 227       if (new_end_aligned > _committed[ri].start()) {
 228         assert(new_end_aligned <= _committed[ri].end(),
 229                "An earlier committed region can't cover a later committed region");
 230         // Any region containing the new end
 231         // should start at or beyond the region found (ind)
 232         // for the new end (committed regions are not expected to
 233         // be proper subsets of other committed regions).
 234         assert(_committed[ri].start() >= _committed[ind].start(),
 235                "New end of committed region is inconsistent");
 236         new_end_aligned = _committed[ri].start();
 237         // new_end_aligned can be equal to the start of its
 238         // committed region (i.e., of "ind") if a second
 239         // region following "ind" also start at the same location
 240         // as "ind".
 241         assert(new_end_aligned >= _committed[ind].start(),
 242           "New end of committed region is before start");
 243         debug_only(collided = true;)
 244         // Should only collide with 1 region
 245         break;
 246       }
 247     }
 248 #ifdef ASSERT
 249     for (++ri; ri < _cur_covered_regions; ri++) {
 250       assert(!_committed[ri].contains(new_end_aligned),
 251         "New end of committed region is in a second committed region");
 252     }
 253 #endif
 254     // The guard page is always committed and should not be committed over.
 255     // "guarded" is used for assertion checking below and recalls the fact
 256     // that the would-be end of the new committed region would have
 257     // penetrated the guard page.
 258     HeapWord* new_end_for_commit = new_end_aligned;
 259 
 260     DEBUG_ONLY(bool guarded = false;)
 261     if (new_end_for_commit > _guard_region.start()) {
 262       new_end_for_commit = _guard_region.start();
 263       DEBUG_ONLY(guarded = true;)
 264     }
 265 
 266     if (new_end_for_commit > cur_committed.end()) {
 267       // Must commit new pages.
 268       MemRegion const new_committed =
 269         MemRegion(cur_committed.end(), new_end_for_commit);
 270 
 271       assert(!new_committed.is_empty(), "Region should not be empty here");
 272       os::commit_memory_or_exit((char*)new_committed.start(),
 273                                 new_committed.byte_size(), _page_size,
 274                                 !ExecMem, "card table expansion");
 275     // Use new_end_aligned (as opposed to new_end_for_commit) because
 276     // the cur_committed region may include the guard region.
 277     } else if (new_end_aligned < cur_committed.end()) {
 278       // Must uncommit pages.
 279       MemRegion const uncommit_region =
 280         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 281                                                 cur_committed.end()));
 282       if (!uncommit_region.is_empty()) {
 283         // It is not safe to uncommit cards if the boundary between
 284         // the generations is moving.  A shrink can uncommit cards
 285         // owned by generation A but being used by generation B.
 286         if (!UseAdaptiveGCBoundary) {
 287           if (!os::uncommit_memory((char*)uncommit_region.start(),
 288                                    uncommit_region.byte_size())) {
 289             assert(false, "Card table contraction failed");
 290             // The call failed so don't change the end of the
 291             // committed region.  This is better than taking the
 292             // VM down.
 293             new_end_aligned = _committed[ind].end();
 294           }
 295         } else {
 296           new_end_aligned = _committed[ind].end();
 297         }
 298       }
 299     }
 300     // In any case, we can reset the end of the current committed entry.
 301     _committed[ind].set_end(new_end_aligned);
 302 
 303 #ifdef ASSERT
 304     // Check that the last card in the new region is committed according
 305     // to the tables.
 306     bool covered = false;
 307     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 308       if (_committed[cr].contains(new_end - 1)) {
 309         covered = true;
 310         break;
 311       }
 312     }
 313     assert(covered, "Card for end of new region not committed");
 314 #endif
 315 
 316     // The default of 0 is not necessarily clean cards.
 317     jbyte* entry;
 318     if (old_region.last() < _whole_heap.start()) {
 319       entry = byte_for(_whole_heap.start());
 320     } else {
 321       entry = byte_after(old_region.last());
 322     }
 323     assert(index_for(new_region.last()) <  _guard_index,
 324       "The guard card will be overwritten");
 325     // This line commented out cleans the newly expanded region and
 326     // not the aligned up expanded region.
 327     // jbyte* const end = byte_after(new_region.last());
 328     jbyte* const end = (jbyte*) new_end_for_commit;
 329     assert((end >= byte_after(new_region.last())) || collided || guarded,
 330       "Expect to be beyond new region unless impacting another region");
 331     // do nothing if we resized downward.
 332 #ifdef ASSERT
 333     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 334       if (ri != ind) {
 335         // The end of the new committed region should not
 336         // be in any existing region unless it matches
 337         // the start of the next region.
 338         assert(!_committed[ri].contains(end) ||
 339                (_committed[ri].start() == (HeapWord*) end),
 340                "Overlapping committed regions");
 341       }
 342     }
 343 #endif
 344     if (entry < end) {
 345       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 346     }
 347   }
 348   // In any case, the covered size changes.
 349   _covered[ind].set_word_size(new_region.word_size());
 350 
 351   log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
 352   log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
 353                          ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
 354   log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
 355                          ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
 356   log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
 357                          p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
 358   log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
 359                          p2i(addr_for((jbyte*) _committed[ind].start())),  p2i(addr_for((jbyte*) _committed[ind].last())));
 360 
 361   // Touch the last card of the covered region to show that it
 362   // is committed (or SEGV).
 363   debug_only((void) (*byte_for(_covered[ind].last()));)
 364   debug_only(verify_guard();)
 365 }
 366 
 367 // Note that these versions are precise!  The scanning code has to handle the
 368 // fact that the write barrier may be either precise or imprecise.
 369 
 370 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 371   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 372   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 373   jbyte* cur  = byte_for(mr.start());
 374   jbyte* last = byte_after(mr.last());
 375   while (cur < last) {
 376     *cur = dirty_card;
 377     cur++;
 378   }
 379 }
 380 
 381 void CardTableModRefBS::invalidate(MemRegion mr) {
 382   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 383   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 384   for (int i = 0; i < _cur_covered_regions; i++) {
 385     MemRegion mri = mr.intersection(_covered[i]);
 386     if (!mri.is_empty()) dirty_MemRegion(mri);
 387   }
 388 }
 389 
 390 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 391   // Be conservative: only clean cards entirely contained within the
 392   // region.
 393   jbyte* cur;
 394   if (mr.start() == _whole_heap.start()) {
 395     cur = byte_for(mr.start());
 396   } else {
 397     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 398     cur = byte_after(mr.start() - 1);
 399   }
 400   jbyte* last = byte_after(mr.last());
 401   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 402 }
 403 
 404 void CardTableModRefBS::clear(MemRegion mr) {
 405   for (int i = 0; i < _cur_covered_regions; i++) {
 406     MemRegion mri = mr.intersection(_covered[i]);
 407     if (!mri.is_empty()) clear_MemRegion(mri);
 408   }
 409 }
 410 
 411 void CardTableModRefBS::dirty(MemRegion mr) {
 412   jbyte* first = byte_for(mr.start());
 413   jbyte* last  = byte_after(mr.last());
 414   memset(first, dirty_card, last-first);
 415 }
 416 
 417 // Unlike several other card table methods, dirty_card_iterate()
 418 // iterates over dirty cards ranges in increasing address order.
 419 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 420                                            MemRegionClosure* cl) {
 421   for (int i = 0; i < _cur_covered_regions; i++) {
 422     MemRegion mri = mr.intersection(_covered[i]);
 423     if (!mri.is_empty()) {
 424       jbyte *cur_entry, *next_entry, *limit;
 425       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 426            cur_entry <= limit;
 427            cur_entry  = next_entry) {
 428         next_entry = cur_entry + 1;
 429         if (*cur_entry == dirty_card) {
 430           size_t dirty_cards;
 431           // Accumulate maximal dirty card range, starting at cur_entry
 432           for (dirty_cards = 1;
 433                next_entry <= limit && *next_entry == dirty_card;
 434                dirty_cards++, next_entry++);
 435           MemRegion cur_cards(addr_for(cur_entry),
 436                               dirty_cards*card_size_in_words);
 437           cl->do_MemRegion(cur_cards);
 438         }
 439       }
 440     }
 441   }
 442 }
 443 
 444 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 445                                                           bool reset,
 446                                                           int reset_val) {
 447   for (int i = 0; i < _cur_covered_regions; i++) {
 448     MemRegion mri = mr.intersection(_covered[i]);
 449     if (!mri.is_empty()) {
 450       jbyte* cur_entry, *next_entry, *limit;
 451       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 452            cur_entry <= limit;
 453            cur_entry  = next_entry) {
 454         next_entry = cur_entry + 1;
 455         if (*cur_entry == dirty_card) {
 456           size_t dirty_cards;
 457           // Accumulate maximal dirty card range, starting at cur_entry
 458           for (dirty_cards = 1;
 459                next_entry <= limit && *next_entry == dirty_card;
 460                dirty_cards++, next_entry++);
 461           MemRegion cur_cards(addr_for(cur_entry),
 462                               dirty_cards*card_size_in_words);
 463           if (reset) {
 464             for (size_t i = 0; i < dirty_cards; i++) {
 465               cur_entry[i] = reset_val;
 466             }
 467           }
 468           return cur_cards;
 469         }
 470       }
 471     }
 472   }
 473   return MemRegion(mr.end(), mr.end());
 474 }
 475 
 476 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 477   return card_size * os::vm_page_size();
 478 }
 479 
 480 void CardTableModRefBS::verify_guard() {
 481   // For product build verification
 482   guarantee(_byte_map[_guard_index] == last_card,
 483             "card table guard has been modified");
 484 }
 485 
 486 void CardTableModRefBS::verify() {
 487   verify_guard();
 488 }
 489 
 490 #ifndef PRODUCT
 491 void CardTableModRefBS::verify_region(MemRegion mr,
 492                                       jbyte val, bool val_equals) {
 493   jbyte* start    = byte_for(mr.start());
 494   jbyte* end      = byte_for(mr.last());
 495   bool failures = false;
 496   for (jbyte* curr = start; curr <= end; ++curr) {
 497     jbyte curr_val = *curr;
 498     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 499     if (failed) {
 500       if (!failures) {
 501         log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
 502         log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
 503         failures = true;
 504       }
 505       log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
 506                             p2i(curr), p2i(addr_for(curr)),
 507                             p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
 508                             (int) curr_val);
 509     }
 510   }
 511   guarantee(!failures, "there should not have been any failures");
 512 }
 513 
 514 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
 515   verify_region(mr, dirty_card, false /* val_equals */);
 516 }
 517 
 518 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 519   verify_region(mr, dirty_card, true /* val_equals */);
 520 }
 521 #endif
 522 
 523 void CardTableModRefBS::print_on(outputStream* st) const {
 524   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
 525                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
 526 }
 527 
 528 // Helper for ReduceInitialCardMarks. For performance,
 529 // compiled code may elide card-marks for initializing stores
 530 // to a newly allocated object along the fast-path. We
 531 // compensate for such elided card-marks as follows:
 532 // (a) Generational, non-concurrent collectors, such as
 533 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
 534 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
 535 //     need the card-mark if and only if the region is
 536 //     in the old gen, and do not care if the card-mark
 537 //     succeeds or precedes the initializing stores themselves,
 538 //     so long as the card-mark is completed before the next
 539 //     scavenge. For all these cases, we can do a card mark
 540 //     at the point at which we do a slow path allocation
 541 //     in the old gen, i.e. in this call.
 542 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
 543 //     in addition that the card-mark for an old gen allocated
 544 //     object strictly follow any associated initializing stores.
 545 //     In these cases, the memRegion remembered below is
 546 //     used to card-mark the entire region either just before the next
 547 //     slow-path allocation by this thread or just before the next scavenge or
 548 //     CMS-associated safepoint, whichever of these events happens first.
 549 //     (The implicit assumption is that the object has been fully
 550 //     initialized by this point, a fact that we assert when doing the
 551 //     card-mark.)
 552 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
 553 //     G1 concurrent marking is in progress an SATB (pre-write-)barrier
 554 //     is used to remember the pre-value of any store. Initializing
 555 //     stores will not need this barrier, so we need not worry about
 556 //     compensating for the missing pre-barrier here. Turning now
 557 //     to the post-barrier, we note that G1 needs a RS update barrier
 558 //     which simply enqueues a (sequence of) dirty cards which may
 559 //     optionally be refined by the concurrent update threads. Note
 560 //     that this barrier need only be applied to a non-young write,
 561 //     but, like in CMS, because of the presence of concurrent refinement
 562 //     (much like CMS' precleaning), must strictly follow the oop-store.
 563 //     Thus, using the same protocol for maintaining the intended
 564 //     invariants turns out, serendepitously, to be the same for both
 565 //     G1 and CMS.
 566 //
 567 // For any future collector, this code should be reexamined with
 568 // that specific collector in mind, and the documentation above suitably
 569 // extended and updated.
 570 void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
 571   if (!ReduceInitialCardMarks) {
 572     return;
 573   }
 574   // If a previous card-mark was deferred, flush it now.
 575   flush_deferred_card_mark_barrier(thread);
 576   if (new_obj->is_typeArray() || is_in_young(new_obj)) {
 577     // Arrays of non-references don't need a post-barrier.
 578     // The deferred_card_mark region should be empty
 579     // following the flush above.
 580     assert(thread->deferred_card_mark().is_empty(), "Error");
 581   } else {
 582     MemRegion mr((HeapWord*)new_obj, new_obj->size());
 583     assert(!mr.is_empty(), "Error");
 584     if (_defer_initial_card_mark) {
 585       // Defer the card mark
 586       thread->set_deferred_card_mark(mr);
 587     } else {
 588       // Do the card mark
 589       write_region(mr);
 590     }
 591   }
 592 }
 593 
 594 void CardTableModRefBS::initialize_deferred_card_mark_barriers() {
 595   // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
 596   // otherwise remains unused.
 597 #if defined(COMPILER2) || INCLUDE_JVMCI
 598   _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
 599                              && (DeferInitialCardMark || card_mark_must_follow_store());
 600 #else
 601   assert(_defer_initial_card_mark == false, "Who would set it?");
 602 #endif
 603 }
 604 
 605 void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
 606 #if defined(COMPILER2) || INCLUDE_JVMCI
 607   MemRegion deferred = thread->deferred_card_mark();
 608   if (!deferred.is_empty()) {
 609     assert(_defer_initial_card_mark, "Otherwise should be empty");
 610     {
 611       // Verify that the storage points to a parsable object in heap
 612       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 613       assert(!is_in_young(old_obj),
 614              "Else should have been filtered in on_slowpath_allocation_exit()");
 615       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
 616       assert(deferred.word_size() == (size_t)(old_obj->size()),
 617              "Mismatch: multiple objects?");
 618     }
 619     write_region(deferred);
 620     // "Clear" the deferred_card_mark field
 621     thread->set_deferred_card_mark(MemRegion());
 622   }
 623   assert(thread->deferred_card_mark().is_empty(), "invariant");
 624 #else
 625   assert(!_defer_initial_card_mark, "Should be false");
 626   assert(thread->deferred_card_mark().is_empty(), "Should be empty");
 627 #endif
 628 }
 629 
 630 void CardTableModRefBS::flush_deferred_barriers(JavaThread* thread) {
 631   // The deferred store barriers must all have been flushed to the
 632   // card-table (or other remembered set structure) before GC starts
 633   // processing the card-table (or other remembered set).
 634   flush_deferred_card_mark_barrier(thread);
 635 }