1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/cardTableModRefBS.inline.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/genCollectedHeap.hpp"
  29 #include "gc/shared/space.inline.hpp"
  30 #include "memory/virtualspace.hpp"
  31 #include "logging/log.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  37 // enumerate ref fields that have been modified (since the last
  38 // enumeration.)
  39 
  40 size_t CardTableModRefBS::compute_byte_map_size()
  41 {
  42   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  43                                         "uninitialized, check declaration order");
  44   assert(_page_size != 0, "uninitialized, check declaration order");
  45   const size_t granularity = os::vm_allocation_granularity();
  46   return align_up(_guard_index + 1, MAX2(_page_size, granularity));
  47 }
  48 
  49 CardTableModRefBS::CardTableModRefBS(
  50   MemRegion whole_heap,
  51   const BarrierSet::FakeRtti& fake_rtti) :
  52   ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
  53   _whole_heap(whole_heap),
  54   _guard_index(0),
  55   _guard_region(),
  56   _last_valid_index(0),
  57   _page_size(os::vm_page_size()),
  58   _byte_map_size(0),
  59   _covered(NULL),
  60   _committed(NULL),
  61   _cur_covered_regions(0),
  62   _byte_map(NULL),
  63   byte_map_base(NULL)
  64 {
  65   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  66   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  67 
  68   assert(card_size <= 512, "card_size must be less than 512"); // why?
  69 
  70   _covered   = new MemRegion[_max_covered_regions];
  71   if (_covered == NULL) {
  72     vm_exit_during_initialization("Could not allocate card table covered region set.");
  73   }
  74 }
  75 
  76 void CardTableModRefBS::initialize() {
  77   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  78   _last_valid_index = _guard_index - 1;
  79 
  80   _byte_map_size = compute_byte_map_size();
  81 
  82   HeapWord* low_bound  = _whole_heap.start();
  83   HeapWord* high_bound = _whole_heap.end();
  84 
  85   _cur_covered_regions = 0;
  86   _committed = new MemRegion[_max_covered_regions];
  87   if (_committed == NULL) {
  88     vm_exit_during_initialization("Could not allocate card table committed region set.");
  89   }
  90 
  91   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  92     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  93   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  94 
  95   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  96 
  97   os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
  98                        _page_size, heap_rs.base(), heap_rs.size());
  99   if (!heap_rs.is_reserved()) {
 100     vm_exit_during_initialization("Could not reserve enough space for the "
 101                                   "card marking array");
 102   }
 103 
 104   // The assembler store_check code will do an unsigned shift of the oop,
 105   // then add it to byte_map_base, i.e.
 106   //
 107   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 108   _byte_map = (jbyte*) heap_rs.base();
 109   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 110   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 111   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 112 
 113   jbyte* guard_card = &_byte_map[_guard_index];
 114   uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
 115   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 116   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 117                             !ExecMem, "card table last card");
 118   *guard_card = last_card;
 119 
 120   log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
 121   log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 122                   p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
 123   log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
 124 }
 125 
 126 CardTableModRefBS::~CardTableModRefBS() {
 127   if (_covered) {
 128     delete[] _covered;
 129     _covered = NULL;
 130   }
 131   if (_committed) {
 132     delete[] _committed;
 133     _committed = NULL;
 134   }
 135 }
 136 
 137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 138   int i;
 139   for (i = 0; i < _cur_covered_regions; i++) {
 140     if (_covered[i].start() == base) return i;
 141     if (_covered[i].start() > base) break;
 142   }
 143   // If we didn't find it, create a new one.
 144   assert(_cur_covered_regions < _max_covered_regions,
 145          "too many covered regions");
 146   // Move the ones above up, to maintain sorted order.
 147   for (int j = _cur_covered_regions; j > i; j--) {
 148     _covered[j] = _covered[j-1];
 149     _committed[j] = _committed[j-1];
 150   }
 151   int res = i;
 152   _cur_covered_regions++;
 153   _covered[res].set_start(base);
 154   _covered[res].set_word_size(0);
 155   jbyte* ct_start = byte_for(base);
 156   uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
 157   _committed[res].set_start((HeapWord*)ct_start_aligned);
 158   _committed[res].set_word_size(0);
 159   return res;
 160 }
 161 
 162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 163   for (int i = 0; i < _cur_covered_regions; i++) {
 164     if (_covered[i].contains(addr)) {
 165       return i;
 166     }
 167   }
 168   assert(0, "address outside of heap?");
 169   return -1;
 170 }
 171 
 172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 173   HeapWord* max_end = NULL;
 174   for (int j = 0; j < ind; j++) {
 175     HeapWord* this_end = _committed[j].end();
 176     if (this_end > max_end) max_end = this_end;
 177   }
 178   return max_end;
 179 }
 180 
 181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 182                                                       MemRegion mr) const {
 183   MemRegion result = mr;
 184   for (int r = 0; r < _cur_covered_regions; r += 1) {
 185     if (r != self) {
 186       result = result.minus(_committed[r]);
 187     }
 188   }
 189   // Never include the guard page.
 190   result = result.minus(_guard_region);
 191   return result;
 192 }
 193 
 194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 195   // We don't change the start of a region, only the end.
 196   assert(_whole_heap.contains(new_region),
 197            "attempt to cover area not in reserved area");
 198   debug_only(verify_guard();)
 199   // collided is true if the expansion would push into another committed region
 200   debug_only(bool collided = false;)
 201   int const ind = find_covering_region_by_base(new_region.start());
 202   MemRegion const old_region = _covered[ind];
 203   assert(old_region.start() == new_region.start(), "just checking");
 204   if (new_region.word_size() != old_region.word_size()) {
 205     // Commit new or uncommit old pages, if necessary.
 206     MemRegion cur_committed = _committed[ind];
 207     // Extend the end of this _committed region
 208     // to cover the end of any lower _committed regions.
 209     // This forms overlapping regions, but never interior regions.
 210     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 211     if (max_prev_end > cur_committed.end()) {
 212       cur_committed.set_end(max_prev_end);
 213     }
 214     // Align the end up to a page size (starts are already aligned).
 215     jbyte* const new_end = byte_after(new_region.last());
 216     HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
 217     assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
 218     // Check the other regions (excludes "ind") to ensure that
 219     // the new_end_aligned does not intrude onto the committed
 220     // space of another region.
 221     int ri = 0;
 222     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
 223       if (new_end_aligned > _committed[ri].start()) {
 224         assert(new_end_aligned <= _committed[ri].end(),
 225                "An earlier committed region can't cover a later committed region");
 226         // Any region containing the new end
 227         // should start at or beyond the region found (ind)
 228         // for the new end (committed regions are not expected to
 229         // be proper subsets of other committed regions).
 230         assert(_committed[ri].start() >= _committed[ind].start(),
 231                "New end of committed region is inconsistent");
 232         new_end_aligned = _committed[ri].start();
 233         // new_end_aligned can be equal to the start of its
 234         // committed region (i.e., of "ind") if a second
 235         // region following "ind" also start at the same location
 236         // as "ind".
 237         assert(new_end_aligned >= _committed[ind].start(),
 238           "New end of committed region is before start");
 239         debug_only(collided = true;)
 240         // Should only collide with 1 region
 241         break;
 242       }
 243     }
 244 #ifdef ASSERT
 245     for (++ri; ri < _cur_covered_regions; ri++) {
 246       assert(!_committed[ri].contains(new_end_aligned),
 247         "New end of committed region is in a second committed region");
 248     }
 249 #endif
 250     // The guard page is always committed and should not be committed over.
 251     // "guarded" is used for assertion checking below and recalls the fact
 252     // that the would-be end of the new committed region would have
 253     // penetrated the guard page.
 254     HeapWord* new_end_for_commit = new_end_aligned;
 255 
 256     DEBUG_ONLY(bool guarded = false;)
 257     if (new_end_for_commit > _guard_region.start()) {
 258       new_end_for_commit = _guard_region.start();
 259       DEBUG_ONLY(guarded = true;)
 260     }
 261 
 262     if (new_end_for_commit > cur_committed.end()) {
 263       // Must commit new pages.
 264       MemRegion const new_committed =
 265         MemRegion(cur_committed.end(), new_end_for_commit);
 266 
 267       assert(!new_committed.is_empty(), "Region should not be empty here");
 268       os::commit_memory_or_exit((char*)new_committed.start(),
 269                                 new_committed.byte_size(), _page_size,
 270                                 !ExecMem, "card table expansion");
 271     // Use new_end_aligned (as opposed to new_end_for_commit) because
 272     // the cur_committed region may include the guard region.
 273     } else if (new_end_aligned < cur_committed.end()) {
 274       // Must uncommit pages.
 275       MemRegion const uncommit_region =
 276         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 277                                                 cur_committed.end()));
 278       if (!uncommit_region.is_empty()) {
 279         // It is not safe to uncommit cards if the boundary between
 280         // the generations is moving.  A shrink can uncommit cards
 281         // owned by generation A but being used by generation B.
 282         if (!UseAdaptiveGCBoundary) {
 283           if (!os::uncommit_memory((char*)uncommit_region.start(),
 284                                    uncommit_region.byte_size())) {
 285             assert(false, "Card table contraction failed");
 286             // The call failed so don't change the end of the
 287             // committed region.  This is better than taking the
 288             // VM down.
 289             new_end_aligned = _committed[ind].end();
 290           }
 291         } else {
 292           new_end_aligned = _committed[ind].end();
 293         }
 294       }
 295     }
 296     // In any case, we can reset the end of the current committed entry.
 297     _committed[ind].set_end(new_end_aligned);
 298 
 299 #ifdef ASSERT
 300     // Check that the last card in the new region is committed according
 301     // to the tables.
 302     bool covered = false;
 303     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 304       if (_committed[cr].contains(new_end - 1)) {
 305         covered = true;
 306         break;
 307       }
 308     }
 309     assert(covered, "Card for end of new region not committed");
 310 #endif
 311 
 312     // The default of 0 is not necessarily clean cards.
 313     jbyte* entry;
 314     if (old_region.last() < _whole_heap.start()) {
 315       entry = byte_for(_whole_heap.start());
 316     } else {
 317       entry = byte_after(old_region.last());
 318     }
 319     assert(index_for(new_region.last()) <  _guard_index,
 320       "The guard card will be overwritten");
 321     // This line commented out cleans the newly expanded region and
 322     // not the aligned up expanded region.
 323     // jbyte* const end = byte_after(new_region.last());
 324     jbyte* const end = (jbyte*) new_end_for_commit;
 325     assert((end >= byte_after(new_region.last())) || collided || guarded,
 326       "Expect to be beyond new region unless impacting another region");
 327     // do nothing if we resized downward.
 328 #ifdef ASSERT
 329     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 330       if (ri != ind) {
 331         // The end of the new committed region should not
 332         // be in any existing region unless it matches
 333         // the start of the next region.
 334         assert(!_committed[ri].contains(end) ||
 335                (_committed[ri].start() == (HeapWord*) end),
 336                "Overlapping committed regions");
 337       }
 338     }
 339 #endif
 340     if (entry < end) {
 341       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 342     }
 343   }
 344   // In any case, the covered size changes.
 345   _covered[ind].set_word_size(new_region.word_size());
 346 
 347   log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
 348   log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
 349                          ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
 350   log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
 351                          ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
 352   log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
 353                          p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
 354   log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
 355                          p2i(addr_for((jbyte*) _committed[ind].start())),  p2i(addr_for((jbyte*) _committed[ind].last())));
 356 
 357   // Touch the last card of the covered region to show that it
 358   // is committed (or SEGV).
 359   debug_only((void) (*byte_for(_covered[ind].last()));)
 360   debug_only(verify_guard();)
 361 }
 362 
 363 // Note that these versions are precise!  The scanning code has to handle the
 364 // fact that the write barrier may be either precise or imprecise.
 365 
 366 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
 367   inline_write_ref_field(field, newVal, release);
 368 }
 369 
 370 
 371 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 372   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 373   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 374   jbyte* cur  = byte_for(mr.start());
 375   jbyte* last = byte_after(mr.last());
 376   while (cur < last) {
 377     *cur = dirty_card;
 378     cur++;
 379   }
 380 }
 381 
 382 void CardTableModRefBS::invalidate(MemRegion mr) {
 383   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 384   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 385   for (int i = 0; i < _cur_covered_regions; i++) {
 386     MemRegion mri = mr.intersection(_covered[i]);
 387     if (!mri.is_empty()) dirty_MemRegion(mri);
 388   }
 389 }
 390 
 391 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 392   // Be conservative: only clean cards entirely contained within the
 393   // region.
 394   jbyte* cur;
 395   if (mr.start() == _whole_heap.start()) {
 396     cur = byte_for(mr.start());
 397   } else {
 398     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 399     cur = byte_after(mr.start() - 1);
 400   }
 401   jbyte* last = byte_after(mr.last());
 402   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 403 }
 404 
 405 void CardTableModRefBS::clear(MemRegion mr) {
 406   for (int i = 0; i < _cur_covered_regions; i++) {
 407     MemRegion mri = mr.intersection(_covered[i]);
 408     if (!mri.is_empty()) clear_MemRegion(mri);
 409   }
 410 }
 411 
 412 void CardTableModRefBS::dirty(MemRegion mr) {
 413   jbyte* first = byte_for(mr.start());
 414   jbyte* last  = byte_after(mr.last());
 415   memset(first, dirty_card, last-first);
 416 }
 417 
 418 // Unlike several other card table methods, dirty_card_iterate()
 419 // iterates over dirty cards ranges in increasing address order.
 420 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 421                                            MemRegionClosure* cl) {
 422   for (int i = 0; i < _cur_covered_regions; i++) {
 423     MemRegion mri = mr.intersection(_covered[i]);
 424     if (!mri.is_empty()) {
 425       jbyte *cur_entry, *next_entry, *limit;
 426       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 427            cur_entry <= limit;
 428            cur_entry  = next_entry) {
 429         next_entry = cur_entry + 1;
 430         if (*cur_entry == dirty_card) {
 431           size_t dirty_cards;
 432           // Accumulate maximal dirty card range, starting at cur_entry
 433           for (dirty_cards = 1;
 434                next_entry <= limit && *next_entry == dirty_card;
 435                dirty_cards++, next_entry++);
 436           MemRegion cur_cards(addr_for(cur_entry),
 437                               dirty_cards*card_size_in_words);
 438           cl->do_MemRegion(cur_cards);
 439         }
 440       }
 441     }
 442   }
 443 }
 444 
 445 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 446                                                           bool reset,
 447                                                           int reset_val) {
 448   for (int i = 0; i < _cur_covered_regions; i++) {
 449     MemRegion mri = mr.intersection(_covered[i]);
 450     if (!mri.is_empty()) {
 451       jbyte* cur_entry, *next_entry, *limit;
 452       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 453            cur_entry <= limit;
 454            cur_entry  = next_entry) {
 455         next_entry = cur_entry + 1;
 456         if (*cur_entry == dirty_card) {
 457           size_t dirty_cards;
 458           // Accumulate maximal dirty card range, starting at cur_entry
 459           for (dirty_cards = 1;
 460                next_entry <= limit && *next_entry == dirty_card;
 461                dirty_cards++, next_entry++);
 462           MemRegion cur_cards(addr_for(cur_entry),
 463                               dirty_cards*card_size_in_words);
 464           if (reset) {
 465             for (size_t i = 0; i < dirty_cards; i++) {
 466               cur_entry[i] = reset_val;
 467             }
 468           }
 469           return cur_cards;
 470         }
 471       }
 472     }
 473   }
 474   return MemRegion(mr.end(), mr.end());
 475 }
 476 
 477 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 478   return card_size * os::vm_page_size();
 479 }
 480 
 481 void CardTableModRefBS::verify_guard() {
 482   // For product build verification
 483   guarantee(_byte_map[_guard_index] == last_card,
 484             "card table guard has been modified");
 485 }
 486 
 487 void CardTableModRefBS::verify() {
 488   verify_guard();
 489 }
 490 
 491 #ifndef PRODUCT
 492 void CardTableModRefBS::verify_region(MemRegion mr,
 493                                       jbyte val, bool val_equals) {
 494   jbyte* start    = byte_for(mr.start());
 495   jbyte* end      = byte_for(mr.last());
 496   bool failures = false;
 497   for (jbyte* curr = start; curr <= end; ++curr) {
 498     jbyte curr_val = *curr;
 499     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 500     if (failed) {
 501       if (!failures) {
 502         log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
 503         log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
 504         failures = true;
 505       }
 506       log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
 507                             p2i(curr), p2i(addr_for(curr)),
 508                             p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
 509                             (int) curr_val);
 510     }
 511   }
 512   guarantee(!failures, "there should not have been any failures");
 513 }
 514 
 515 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
 516   verify_region(mr, dirty_card, false /* val_equals */);
 517 }
 518 
 519 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 520   verify_region(mr, dirty_card, true /* val_equals */);
 521 }
 522 #endif
 523 
 524 void CardTableModRefBS::print_on(outputStream* st) const {
 525   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
 526                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
 527 }
 528