1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/cardTableModRefBS.inline.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/genCollectedHeap.hpp"
  29 #include "gc/shared/space.inline.hpp"
  30 #include "memory/virtualspace.hpp"
  31 #include "logging/log.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/macros.hpp"
  34 
  35 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  36 // enumerate ref fields that have been modified (since the last
  37 // enumeration.)
  38 
  39 size_t CardTableModRefBS::compute_byte_map_size()
  40 {
  41   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  42                                         "uninitialized, check declaration order");
  43   assert(_page_size != 0, "uninitialized, check declaration order");
  44   const size_t granularity = os::vm_allocation_granularity();
  45   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  46 }
  47 
  48 CardTableModRefBS::CardTableModRefBS(
  49   MemRegion whole_heap,
  50   const BarrierSet::FakeRtti& fake_rtti) :
  51   ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
  52   _whole_heap(whole_heap),
  53   _guard_index(0),
  54   _guard_region(),
  55   _last_valid_index(0),
  56   _page_size(os::vm_page_size()),
  57   _byte_map_size(0),
  58   _covered(NULL),
  59   _committed(NULL),
  60   _cur_covered_regions(0),
  61   _byte_map(NULL),
  62   byte_map_base(NULL)
  63 {
  64   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  65   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  66 
  67   assert(card_size <= 512, "card_size must be less than 512"); // why?
  68 
  69   _covered   = new MemRegion[_max_covered_regions];
  70   if (_covered == NULL) {
  71     vm_exit_during_initialization("Could not allocate card table covered region set.");
  72   }
  73 }
  74 
  75 void CardTableModRefBS::initialize() {
  76   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  77   _last_valid_index = _guard_index - 1;
  78 
  79   _byte_map_size = compute_byte_map_size();
  80 
  81   HeapWord* low_bound  = _whole_heap.start();
  82   HeapWord* high_bound = _whole_heap.end();
  83 
  84   _cur_covered_regions = 0;
  85   _committed = new MemRegion[_max_covered_regions];
  86   if (_committed == NULL) {
  87     vm_exit_during_initialization("Could not allocate card table committed region set.");
  88   }
  89 
  90   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  91     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  92   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  93 
  94   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  95 
  96   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  97                        _page_size, heap_rs.base(), heap_rs.size());
  98   if (!heap_rs.is_reserved()) {
  99     vm_exit_during_initialization("Could not reserve enough space for the "
 100                                   "card marking array");
 101   }
 102 
 103   // The assembler store_check code will do an unsigned shift of the oop,
 104   // then add it to byte_map_base, i.e.
 105   //
 106   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 107   _byte_map = (jbyte*) heap_rs.base();
 108   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 109   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 110   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 111 
 112   jbyte* guard_card = &_byte_map[_guard_index];
 113   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 114   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 115   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 116                             !ExecMem, "card table last card");
 117   *guard_card = last_card;
 118 
 119   log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
 120   log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 121                   p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
 122   log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
 123 }
 124 
 125 CardTableModRefBS::~CardTableModRefBS() {
 126   if (_covered) {
 127     delete[] _covered;
 128     _covered = NULL;
 129   }
 130   if (_committed) {
 131     delete[] _committed;
 132     _committed = NULL;
 133   }
 134 }
 135 
 136 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
 137   int i;
 138   for (i = 0; i < _cur_covered_regions; i++) {
 139     if (_covered[i].start() == base) return i;
 140     if (_covered[i].start() > base) break;
 141   }
 142   // If we didn't find it, create a new one.
 143   assert(_cur_covered_regions < _max_covered_regions,
 144          "too many covered regions");
 145   // Move the ones above up, to maintain sorted order.
 146   for (int j = _cur_covered_regions; j > i; j--) {
 147     _covered[j] = _covered[j-1];
 148     _committed[j] = _committed[j-1];
 149   }
 150   int res = i;
 151   _cur_covered_regions++;
 152   _covered[res].set_start(base);
 153   _covered[res].set_word_size(0);
 154   jbyte* ct_start = byte_for(base);
 155   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
 156   _committed[res].set_start((HeapWord*)ct_start_aligned);
 157   _committed[res].set_word_size(0);
 158   return res;
 159 }
 160 
 161 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
 162   for (int i = 0; i < _cur_covered_regions; i++) {
 163     if (_covered[i].contains(addr)) {
 164       return i;
 165     }
 166   }
 167   assert(0, "address outside of heap?");
 168   return -1;
 169 }
 170 
 171 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
 172   HeapWord* max_end = NULL;
 173   for (int j = 0; j < ind; j++) {
 174     HeapWord* this_end = _committed[j].end();
 175     if (this_end > max_end) max_end = this_end;
 176   }
 177   return max_end;
 178 }
 179 
 180 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
 181                                                       MemRegion mr) const {
 182   MemRegion result = mr;
 183   for (int r = 0; r < _cur_covered_regions; r += 1) {
 184     if (r != self) {
 185       result = result.minus(_committed[r]);
 186     }
 187   }
 188   // Never include the guard page.
 189   result = result.minus(_guard_region);
 190   return result;
 191 }
 192 
 193 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
 194   // We don't change the start of a region, only the end.
 195   assert(_whole_heap.contains(new_region),
 196            "attempt to cover area not in reserved area");
 197   debug_only(verify_guard();)
 198   // collided is true if the expansion would push into another committed region
 199   debug_only(bool collided = false;)
 200   int const ind = find_covering_region_by_base(new_region.start());
 201   MemRegion const old_region = _covered[ind];
 202   assert(old_region.start() == new_region.start(), "just checking");
 203   if (new_region.word_size() != old_region.word_size()) {
 204     // Commit new or uncommit old pages, if necessary.
 205     MemRegion cur_committed = _committed[ind];
 206     // Extend the end of this _committed region
 207     // to cover the end of any lower _committed regions.
 208     // This forms overlapping regions, but never interior regions.
 209     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 210     if (max_prev_end > cur_committed.end()) {
 211       cur_committed.set_end(max_prev_end);
 212     }
 213     // Align the end up to a page size (starts are already aligned).
 214     jbyte* const new_end = byte_after(new_region.last());
 215     HeapWord* new_end_aligned =
 216       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 217     assert(new_end_aligned >= (HeapWord*) new_end,
 218            "align up, but less");
 219     // Check the other regions (excludes "ind") to ensure that
 220     // the new_end_aligned does not intrude onto the committed
 221     // space of another region.
 222     int ri = 0;
 223     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
 224       if (new_end_aligned > _committed[ri].start()) {
 225         assert(new_end_aligned <= _committed[ri].end(),
 226                "An earlier committed region can't cover a later committed region");
 227         // Any region containing the new end
 228         // should start at or beyond the region found (ind)
 229         // for the new end (committed regions are not expected to
 230         // be proper subsets of other committed regions).
 231         assert(_committed[ri].start() >= _committed[ind].start(),
 232                "New end of committed region is inconsistent");
 233         new_end_aligned = _committed[ri].start();
 234         // new_end_aligned can be equal to the start of its
 235         // committed region (i.e., of "ind") if a second
 236         // region following "ind" also start at the same location
 237         // as "ind".
 238         assert(new_end_aligned >= _committed[ind].start(),
 239           "New end of committed region is before start");
 240         debug_only(collided = true;)
 241         // Should only collide with 1 region
 242         break;
 243       }
 244     }
 245 #ifdef ASSERT
 246     for (++ri; ri < _cur_covered_regions; ri++) {
 247       assert(!_committed[ri].contains(new_end_aligned),
 248         "New end of committed region is in a second committed region");
 249     }
 250 #endif
 251     // The guard page is always committed and should not be committed over.
 252     // "guarded" is used for assertion checking below and recalls the fact
 253     // that the would-be end of the new committed region would have
 254     // penetrated the guard page.
 255     HeapWord* new_end_for_commit = new_end_aligned;
 256 
 257     DEBUG_ONLY(bool guarded = false;)
 258     if (new_end_for_commit > _guard_region.start()) {
 259       new_end_for_commit = _guard_region.start();
 260       DEBUG_ONLY(guarded = true;)
 261     }
 262 
 263     if (new_end_for_commit > cur_committed.end()) {
 264       // Must commit new pages.
 265       MemRegion const new_committed =
 266         MemRegion(cur_committed.end(), new_end_for_commit);
 267 
 268       assert(!new_committed.is_empty(), "Region should not be empty here");
 269       os::commit_memory_or_exit((char*)new_committed.start(),
 270                                 new_committed.byte_size(), _page_size,
 271                                 !ExecMem, "card table expansion");
 272     // Use new_end_aligned (as opposed to new_end_for_commit) because
 273     // the cur_committed region may include the guard region.
 274     } else if (new_end_aligned < cur_committed.end()) {
 275       // Must uncommit pages.
 276       MemRegion const uncommit_region =
 277         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 278                                                 cur_committed.end()));
 279       if (!uncommit_region.is_empty()) {
 280         // It is not safe to uncommit cards if the boundary between
 281         // the generations is moving.  A shrink can uncommit cards
 282         // owned by generation A but being used by generation B.
 283         if (!UseAdaptiveGCBoundary) {
 284           if (!os::uncommit_memory((char*)uncommit_region.start(),
 285                                    uncommit_region.byte_size())) {
 286             assert(false, "Card table contraction failed");
 287             // The call failed so don't change the end of the
 288             // committed region.  This is better than taking the
 289             // VM down.
 290             new_end_aligned = _committed[ind].end();
 291           }
 292         } else {
 293           new_end_aligned = _committed[ind].end();
 294         }
 295       }
 296     }
 297     // In any case, we can reset the end of the current committed entry.
 298     _committed[ind].set_end(new_end_aligned);
 299 
 300 #ifdef ASSERT
 301     // Check that the last card in the new region is committed according
 302     // to the tables.
 303     bool covered = false;
 304     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 305       if (_committed[cr].contains(new_end - 1)) {
 306         covered = true;
 307         break;
 308       }
 309     }
 310     assert(covered, "Card for end of new region not committed");
 311 #endif
 312 
 313     // The default of 0 is not necessarily clean cards.
 314     jbyte* entry;
 315     if (old_region.last() < _whole_heap.start()) {
 316       entry = byte_for(_whole_heap.start());
 317     } else {
 318       entry = byte_after(old_region.last());
 319     }
 320     assert(index_for(new_region.last()) <  _guard_index,
 321       "The guard card will be overwritten");
 322     // This line commented out cleans the newly expanded region and
 323     // not the aligned up expanded region.
 324     // jbyte* const end = byte_after(new_region.last());
 325     jbyte* const end = (jbyte*) new_end_for_commit;
 326     assert((end >= byte_after(new_region.last())) || collided || guarded,
 327       "Expect to be beyond new region unless impacting another region");
 328     // do nothing if we resized downward.
 329 #ifdef ASSERT
 330     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 331       if (ri != ind) {
 332         // The end of the new committed region should not
 333         // be in any existing region unless it matches
 334         // the start of the next region.
 335         assert(!_committed[ri].contains(end) ||
 336                (_committed[ri].start() == (HeapWord*) end),
 337                "Overlapping committed regions");
 338       }
 339     }
 340 #endif
 341     if (entry < end) {
 342       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
 343     }
 344   }
 345   // In any case, the covered size changes.
 346   _covered[ind].set_word_size(new_region.word_size());
 347 
 348   log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
 349   log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
 350                          ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
 351   log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
 352                          ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
 353   log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
 354                          p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
 355   log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
 356                          p2i(addr_for((jbyte*) _committed[ind].start())),  p2i(addr_for((jbyte*) _committed[ind].last())));
 357 
 358   // Touch the last card of the covered region to show that it
 359   // is committed (or SEGV).
 360   debug_only((void) (*byte_for(_covered[ind].last()));)
 361   debug_only(verify_guard();)
 362 }
 363 
 364 // Note that these versions are precise!  The scanning code has to handle the
 365 // fact that the write barrier may be either precise or imprecise.
 366 
 367 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
 368   inline_write_ref_field(field, newVal, release);
 369 }
 370 
 371 
 372 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
 373   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 374   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 375   jbyte* cur  = byte_for(mr.start());
 376   jbyte* last = byte_after(mr.last());
 377   while (cur < last) {
 378     *cur = dirty_card;
 379     cur++;
 380   }
 381 }
 382 
 383 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
 384   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 385   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 386   for (int i = 0; i < _cur_covered_regions; i++) {
 387     MemRegion mri = mr.intersection(_covered[i]);
 388     if (!mri.is_empty()) dirty_MemRegion(mri);
 389   }
 390 }
 391 
 392 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
 393   // Be conservative: only clean cards entirely contained within the
 394   // region.
 395   jbyte* cur;
 396   if (mr.start() == _whole_heap.start()) {
 397     cur = byte_for(mr.start());
 398   } else {
 399     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 400     cur = byte_after(mr.start() - 1);
 401   }
 402   jbyte* last = byte_after(mr.last());
 403   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
 404 }
 405 
 406 void CardTableModRefBS::clear(MemRegion mr) {
 407   for (int i = 0; i < _cur_covered_regions; i++) {
 408     MemRegion mri = mr.intersection(_covered[i]);
 409     if (!mri.is_empty()) clear_MemRegion(mri);
 410   }
 411 }
 412 
 413 void CardTableModRefBS::dirty(MemRegion mr) {
 414   jbyte* first = byte_for(mr.start());
 415   jbyte* last  = byte_after(mr.last());
 416   memset(first, dirty_card, last-first);
 417 }
 418 
 419 // Unlike several other card table methods, dirty_card_iterate()
 420 // iterates over dirty cards ranges in increasing address order.
 421 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
 422                                            MemRegionClosure* cl) {
 423   for (int i = 0; i < _cur_covered_regions; i++) {
 424     MemRegion mri = mr.intersection(_covered[i]);
 425     if (!mri.is_empty()) {
 426       jbyte *cur_entry, *next_entry, *limit;
 427       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 428            cur_entry <= limit;
 429            cur_entry  = next_entry) {
 430         next_entry = cur_entry + 1;
 431         if (*cur_entry == dirty_card) {
 432           size_t dirty_cards;
 433           // Accumulate maximal dirty card range, starting at cur_entry
 434           for (dirty_cards = 1;
 435                next_entry <= limit && *next_entry == dirty_card;
 436                dirty_cards++, next_entry++);
 437           MemRegion cur_cards(addr_for(cur_entry),
 438                               dirty_cards*card_size_in_words);
 439           cl->do_MemRegion(cur_cards);
 440         }
 441       }
 442     }
 443   }
 444 }
 445 
 446 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
 447                                                           bool reset,
 448                                                           int reset_val) {
 449   for (int i = 0; i < _cur_covered_regions; i++) {
 450     MemRegion mri = mr.intersection(_covered[i]);
 451     if (!mri.is_empty()) {
 452       jbyte* cur_entry, *next_entry, *limit;
 453       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 454            cur_entry <= limit;
 455            cur_entry  = next_entry) {
 456         next_entry = cur_entry + 1;
 457         if (*cur_entry == dirty_card) {
 458           size_t dirty_cards;
 459           // Accumulate maximal dirty card range, starting at cur_entry
 460           for (dirty_cards = 1;
 461                next_entry <= limit && *next_entry == dirty_card;
 462                dirty_cards++, next_entry++);
 463           MemRegion cur_cards(addr_for(cur_entry),
 464                               dirty_cards*card_size_in_words);
 465           if (reset) {
 466             for (size_t i = 0; i < dirty_cards; i++) {
 467               cur_entry[i] = reset_val;
 468             }
 469           }
 470           return cur_cards;
 471         }
 472       }
 473     }
 474   }
 475   return MemRegion(mr.end(), mr.end());
 476 }
 477 
 478 uintx CardTableModRefBS::ct_max_alignment_constraint() {
 479   return card_size * os::vm_page_size();
 480 }
 481 
 482 void CardTableModRefBS::verify_guard() {
 483   // For product build verification
 484   guarantee(_byte_map[_guard_index] == last_card,
 485             "card table guard has been modified");
 486 }
 487 
 488 void CardTableModRefBS::verify() {
 489   verify_guard();
 490 }
 491 
 492 #ifndef PRODUCT
 493 void CardTableModRefBS::verify_region(MemRegion mr,
 494                                       jbyte val, bool val_equals) {
 495   jbyte* start    = byte_for(mr.start());
 496   jbyte* end      = byte_for(mr.last());
 497   bool failures = false;
 498   for (jbyte* curr = start; curr <= end; ++curr) {
 499     jbyte curr_val = *curr;
 500     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 501     if (failed) {
 502       if (!failures) {
 503         log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
 504         log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
 505         failures = true;
 506       }
 507       log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
 508                             p2i(curr), p2i(addr_for(curr)),
 509                             p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
 510                             (int) curr_val);
 511     }
 512   }
 513   guarantee(!failures, "there should not have been any failures");
 514 }
 515 
 516 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
 517   verify_region(mr, dirty_card, false /* val_equals */);
 518 }
 519 
 520 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
 521   verify_region(mr, dirty_card, true /* val_equals */);
 522 }
 523 #endif
 524 
 525 void CardTableModRefBS::print_on(outputStream* st) const {
 526   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
 527                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
 528 }
 529