1 /*
   2  * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/cardTable.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/space.inline.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/virtualspace.hpp"
  31 #include "runtime/java.hpp"
  32 #include "runtime/os.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 
  36 size_t CardTable::compute_byte_map_size() {
  37   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  38                                         "uninitialized, check declaration order");
  39   assert(_page_size != 0, "uninitialized, check declaration order");
  40   const size_t granularity = os::vm_allocation_granularity();
  41   return align_up(_guard_index + 1, MAX2(_page_size, granularity));
  42 }
  43 
  44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
  45   _scanned_concurrently(conc_scan),
  46   _whole_heap(whole_heap),
  47   _guard_index(0),
  48   _last_valid_index(0),
  49   _page_size(os::vm_page_size()),
  50   _byte_map_size(0),
  51   _byte_map(NULL),
  52   _byte_map_base(NULL),
  53   _cur_covered_regions(0),
  54   _covered(MemRegion::create_array(_max_covered_regions, mtGC)),
  55   _committed(MemRegion::create_array(_max_covered_regions, mtGC)),
  56   _guard_region()
  57 {
  58   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  59   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  60 
  61   assert(card_size <= 512, "card_size must be less than 512"); // why?
  62 }
  63 
  64 CardTable::~CardTable() {
  65   MemRegion::destroy_array(_covered, _max_covered_regions);
  66   MemRegion::destroy_array(_committed, _max_covered_regions);
  67 }
  68 
  69 void CardTable::initialize() {
  70   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  71   _last_valid_index = _guard_index - 1;
  72 
  73   _byte_map_size = compute_byte_map_size();
  74 
  75   HeapWord* low_bound  = _whole_heap.start();
  76   HeapWord* high_bound = _whole_heap.end();
  77 
  78   _cur_covered_regions = 0;
  79 
  80   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  81     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  82   ReservedSpace heap_rs(_byte_map_size, rs_align);
  83 
  84   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  85 
  86   os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
  87                        _page_size, heap_rs.base(), heap_rs.size());
  88   if (!heap_rs.is_reserved()) {
  89     vm_exit_during_initialization("Could not reserve enough space for the "
  90                                   "card marking array");
  91   }
  92 
  93   // The assembler store_check code will do an unsigned shift of the oop,
  94   // then add it to _byte_map_base, i.e.
  95   //
  96   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
  97   _byte_map = (CardValue*) heap_rs.base();
  98   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
  99   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 100   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 101 
 102   CardValue* guard_card = &_byte_map[_guard_index];
 103   HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
 104   _guard_region = MemRegion(guard_page, _page_size);
 105   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 106                             !ExecMem, "card table last card");
 107   *guard_card = last_card;
 108 
 109   log_trace(gc, barrier)("CardTable::CardTable: ");
 110   log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 111                   p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
 112   log_trace(gc, barrier)("    _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
 113 }
 114 
 115 int CardTable::find_covering_region_by_base(HeapWord* base) {
 116   int i;
 117   for (i = 0; i < _cur_covered_regions; i++) {
 118     if (_covered[i].start() == base) return i;
 119     if (_covered[i].start() > base) break;
 120   }
 121   // If we didn't find it, create a new one.
 122   assert(_cur_covered_regions < _max_covered_regions,
 123          "too many covered regions");
 124   // Move the ones above up, to maintain sorted order.
 125   for (int j = _cur_covered_regions; j > i; j--) {
 126     _covered[j] = _covered[j-1];
 127     _committed[j] = _committed[j-1];
 128   }
 129   int res = i;
 130   _cur_covered_regions++;
 131   _covered[res].set_start(base);
 132   _covered[res].set_word_size(0);
 133   CardValue* ct_start = byte_for(base);
 134   HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
 135   _committed[res].set_start(ct_start_aligned);
 136   _committed[res].set_word_size(0);
 137   return res;
 138 }
 139 
 140 int CardTable::find_covering_region_containing(HeapWord* addr) {
 141   for (int i = 0; i < _cur_covered_regions; i++) {
 142     if (_covered[i].contains(addr)) {
 143       return i;
 144     }
 145   }
 146   assert(0, "address outside of heap?");
 147   return -1;
 148 }
 149 
 150 HeapWord* CardTable::largest_prev_committed_end(int ind) const {
 151   HeapWord* max_end = NULL;
 152   for (int j = 0; j < ind; j++) {
 153     HeapWord* this_end = _committed[j].end();
 154     if (this_end > max_end) max_end = this_end;
 155   }
 156   return max_end;
 157 }
 158 
 159 MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
 160   MemRegion result = mr;
 161   for (int r = 0; r < _cur_covered_regions; r += 1) {
 162     if (r != self) {
 163       result = result.minus(_committed[r]);
 164     }
 165   }
 166   // Never include the guard page.
 167   result = result.minus(_guard_region);
 168   return result;
 169 }
 170 
 171 void CardTable::resize_covered_region(MemRegion new_region) {
 172   // We don't change the start of a region, only the end.
 173   assert(_whole_heap.contains(new_region),
 174            "attempt to cover area not in reserved area");
 175   debug_only(verify_guard();)
 176   // collided is true if the expansion would push into another committed region
 177   debug_only(bool collided = false;)
 178   int const ind = find_covering_region_by_base(new_region.start());
 179   MemRegion const old_region = _covered[ind];
 180   assert(old_region.start() == new_region.start(), "just checking");
 181   if (new_region.word_size() != old_region.word_size()) {
 182     // Commit new or uncommit old pages, if necessary.
 183     MemRegion cur_committed = _committed[ind];
 184     // Extend the end of this _committed region
 185     // to cover the end of any lower _committed regions.
 186     // This forms overlapping regions, but never interior regions.
 187     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 188     if (max_prev_end > cur_committed.end()) {
 189       cur_committed.set_end(max_prev_end);
 190     }
 191     // Align the end up to a page size (starts are already aligned).
 192     HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
 193     HeapWord* new_end_aligned = align_up(new_end, _page_size);
 194     assert(new_end_aligned >= new_end, "align up, but less");
 195     // Check the other regions (excludes "ind") to ensure that
 196     // the new_end_aligned does not intrude onto the committed
 197     // space of another region.
 198     int ri = 0;
 199     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
 200       if (new_end_aligned > _committed[ri].start()) {
 201         assert(new_end_aligned <= _committed[ri].end(),
 202                "An earlier committed region can't cover a later committed region");
 203         // Any region containing the new end
 204         // should start at or beyond the region found (ind)
 205         // for the new end (committed regions are not expected to
 206         // be proper subsets of other committed regions).
 207         assert(_committed[ri].start() >= _committed[ind].start(),
 208                "New end of committed region is inconsistent");
 209         new_end_aligned = _committed[ri].start();
 210         // new_end_aligned can be equal to the start of its
 211         // committed region (i.e., of "ind") if a second
 212         // region following "ind" also start at the same location
 213         // as "ind".
 214         assert(new_end_aligned >= _committed[ind].start(),
 215           "New end of committed region is before start");
 216         debug_only(collided = true;)
 217         // Should only collide with 1 region
 218         break;
 219       }
 220     }
 221 #ifdef ASSERT
 222     for (++ri; ri < _cur_covered_regions; ri++) {
 223       assert(!_committed[ri].contains(new_end_aligned),
 224         "New end of committed region is in a second committed region");
 225     }
 226 #endif
 227     // The guard page is always committed and should not be committed over.
 228     // "guarded" is used for assertion checking below and recalls the fact
 229     // that the would-be end of the new committed region would have
 230     // penetrated the guard page.
 231     HeapWord* new_end_for_commit = new_end_aligned;
 232 
 233     DEBUG_ONLY(bool guarded = false;)
 234     if (new_end_for_commit > _guard_region.start()) {
 235       new_end_for_commit = _guard_region.start();
 236       DEBUG_ONLY(guarded = true;)
 237     }
 238 
 239     if (new_end_for_commit > cur_committed.end()) {
 240       // Must commit new pages.
 241       MemRegion const new_committed =
 242         MemRegion(cur_committed.end(), new_end_for_commit);
 243 
 244       assert(!new_committed.is_empty(), "Region should not be empty here");
 245       os::commit_memory_or_exit((char*)new_committed.start(),
 246                                 new_committed.byte_size(), _page_size,
 247                                 !ExecMem, "card table expansion");
 248     // Use new_end_aligned (as opposed to new_end_for_commit) because
 249     // the cur_committed region may include the guard region.
 250     } else if (new_end_aligned < cur_committed.end()) {
 251       // Must uncommit pages.
 252       MemRegion const uncommit_region =
 253         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 254                                                 cur_committed.end()));
 255       if (!uncommit_region.is_empty()) {
 256         if (!os::uncommit_memory((char*)uncommit_region.start(),
 257                                  uncommit_region.byte_size())) {
 258           assert(false, "Card table contraction failed");
 259           // The call failed so don't change the end of the
 260           // committed region.  This is better than taking the
 261           // VM down.
 262           new_end_aligned = _committed[ind].end();
 263         }
 264       }
 265     }
 266     // In any case, we can reset the end of the current committed entry.
 267     _committed[ind].set_end(new_end_aligned);
 268 
 269 #ifdef ASSERT
 270     // Check that the last card in the new region is committed according
 271     // to the tables.
 272     bool covered = false;
 273     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 274       if (_committed[cr].contains(new_end - 1)) {
 275         covered = true;
 276         break;
 277       }
 278     }
 279     assert(covered, "Card for end of new region not committed");
 280 #endif
 281 
 282     // The default of 0 is not necessarily clean cards.
 283     CardValue* entry;
 284     if (old_region.last() < _whole_heap.start()) {
 285       entry = byte_for(_whole_heap.start());
 286     } else {
 287       entry = byte_after(old_region.last());
 288     }
 289     assert(index_for(new_region.last()) <  _guard_index,
 290       "The guard card will be overwritten");
 291     // This line commented out cleans the newly expanded region and
 292     // not the aligned up expanded region.
 293     // CardValue* const end = byte_after(new_region.last());
 294     CardValue* const end = (CardValue*) new_end_for_commit;
 295     assert((end >= byte_after(new_region.last())) || collided || guarded,
 296       "Expect to be beyond new region unless impacting another region");
 297     // do nothing if we resized downward.
 298 #ifdef ASSERT
 299     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 300       if (ri != ind) {
 301         // The end of the new committed region should not
 302         // be in any existing region unless it matches
 303         // the start of the next region.
 304         assert(!_committed[ri].contains(end) ||
 305                (_committed[ri].start() == (HeapWord*) end),
 306                "Overlapping committed regions");
 307       }
 308     }
 309 #endif
 310     if (entry < end) {
 311       memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue)));
 312     }
 313   }
 314   // In any case, the covered size changes.
 315   _covered[ind].set_word_size(new_region.word_size());
 316 
 317   log_trace(gc, barrier)("CardTable::resize_covered_region: ");
 318   log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
 319                          ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
 320   log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
 321                          ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
 322   log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
 323                          p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
 324   log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
 325                          p2i(addr_for((CardValue*) _committed[ind].start())),  p2i(addr_for((CardValue*) _committed[ind].last())));
 326 
 327   // Touch the last card of the covered region to show that it
 328   // is committed (or SEGV).
 329   debug_only((void) (*byte_for(_covered[ind].last()));)
 330   debug_only(verify_guard();)
 331 }
 332 
 333 // Note that these versions are precise!  The scanning code has to handle the
 334 // fact that the write barrier may be either precise or imprecise.
 335 void CardTable::dirty_MemRegion(MemRegion mr) {
 336   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 337   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 338   CardValue* cur  = byte_for(mr.start());
 339   CardValue* last = byte_after(mr.last());
 340   while (cur < last) {
 341     *cur = dirty_card;
 342     cur++;
 343   }
 344 }
 345 
 346 void CardTable::clear_MemRegion(MemRegion mr) {
 347   // Be conservative: only clean cards entirely contained within the
 348   // region.
 349   CardValue* cur;
 350   if (mr.start() == _whole_heap.start()) {
 351     cur = byte_for(mr.start());
 352   } else {
 353     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 354     cur = byte_after(mr.start() - 1);
 355   }
 356   CardValue* last = byte_after(mr.last());
 357   memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue)));
 358 }
 359 
 360 void CardTable::clear(MemRegion mr) {
 361   for (int i = 0; i < _cur_covered_regions; i++) {
 362     MemRegion mri = mr.intersection(_covered[i]);
 363     if (!mri.is_empty()) clear_MemRegion(mri);
 364   }
 365 }
 366 
 367 void CardTable::dirty(MemRegion mr) {
 368   CardValue* first = byte_for(mr.start());
 369   CardValue* last  = byte_after(mr.last());
 370   memset(first, dirty_card, last-first);
 371 }
 372 
 373 // Unlike several other card table methods, dirty_card_iterate()
 374 // iterates over dirty cards ranges in increasing address order.
 375 void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
 376   for (int i = 0; i < _cur_covered_regions; i++) {
 377     MemRegion mri = mr.intersection(_covered[i]);
 378     if (!mri.is_empty()) {
 379       CardValue *cur_entry, *next_entry, *limit;
 380       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 381            cur_entry <= limit;
 382            cur_entry  = next_entry) {
 383         next_entry = cur_entry + 1;
 384         if (*cur_entry == dirty_card) {
 385           size_t dirty_cards;
 386           // Accumulate maximal dirty card range, starting at cur_entry
 387           for (dirty_cards = 1;
 388                next_entry <= limit && *next_entry == dirty_card;
 389                dirty_cards++, next_entry++);
 390           MemRegion cur_cards(addr_for(cur_entry),
 391                               dirty_cards*card_size_in_words);
 392           cl->do_MemRegion(cur_cards);
 393         }
 394       }
 395     }
 396   }
 397 }
 398 
 399 MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
 400                                                   bool reset,
 401                                                   int reset_val) {
 402   for (int i = 0; i < _cur_covered_regions; i++) {
 403     MemRegion mri = mr.intersection(_covered[i]);
 404     if (!mri.is_empty()) {
 405       CardValue* cur_entry, *next_entry, *limit;
 406       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 407            cur_entry <= limit;
 408            cur_entry  = next_entry) {
 409         next_entry = cur_entry + 1;
 410         if (*cur_entry == dirty_card) {
 411           size_t dirty_cards;
 412           // Accumulate maximal dirty card range, starting at cur_entry
 413           for (dirty_cards = 1;
 414                next_entry <= limit && *next_entry == dirty_card;
 415                dirty_cards++, next_entry++);
 416           MemRegion cur_cards(addr_for(cur_entry),
 417                               dirty_cards*card_size_in_words);
 418           if (reset) {
 419             for (size_t i = 0; i < dirty_cards; i++) {
 420               cur_entry[i] = reset_val;
 421             }
 422           }
 423           return cur_cards;
 424         }
 425       }
 426     }
 427   }
 428   return MemRegion(mr.end(), mr.end());
 429 }
 430 
 431 uintx CardTable::ct_max_alignment_constraint() {
 432   return card_size * os::vm_page_size();
 433 }
 434 
 435 void CardTable::verify_guard() {
 436   // For product build verification
 437   guarantee(_byte_map[_guard_index] == last_card,
 438             "card table guard has been modified");
 439 }
 440 
 441 void CardTable::invalidate(MemRegion mr) {
 442   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 443   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 444   for (int i = 0; i < _cur_covered_regions; i++) {
 445     MemRegion mri = mr.intersection(_covered[i]);
 446     if (!mri.is_empty()) dirty_MemRegion(mri);
 447   }
 448 }
 449 
 450 void CardTable::verify() {
 451   verify_guard();
 452 }
 453 
 454 #ifndef PRODUCT
 455 void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) {
 456   CardValue* start    = byte_for(mr.start());
 457   CardValue* end      = byte_for(mr.last());
 458   bool failures = false;
 459   for (CardValue* curr = start; curr <= end; ++curr) {
 460     CardValue curr_val = *curr;
 461     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 462     if (failed) {
 463       if (!failures) {
 464         log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
 465         log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
 466         failures = true;
 467       }
 468       log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
 469                             p2i(curr), p2i(addr_for(curr)),
 470                             p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
 471                             (int) curr_val);
 472     }
 473   }
 474   guarantee(!failures, "there should not have been any failures");
 475 }
 476 
 477 void CardTable::verify_not_dirty_region(MemRegion mr) {
 478   verify_region(mr, dirty_card, false /* val_equals */);
 479 }
 480 
 481 void CardTable::verify_dirty_region(MemRegion mr) {
 482   verify_region(mr, dirty_card, true /* val_equals */);
 483 }
 484 #endif
 485 
 486 void CardTable::print_on(outputStream* st) const {
 487   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT,
 488                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
 489 }