1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTable.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "gc/shared/space.inline.hpp" 29 #include "logging/log.hpp" 30 #include "memory/virtualspace.hpp" 31 #include "runtime/java.hpp" 32 #include "runtime/os.hpp" 33 #include "services/memTracker.hpp" 34 #include "utilities/align.hpp" 35 36 size_t CardTable::compute_byte_map_size() { 37 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, 38 "uninitialized, check declaration order"); 39 assert(_page_size != 0, "uninitialized, check declaration order"); 40 const size_t granularity = os::vm_allocation_granularity(); 41 return align_up(_guard_index + 1, MAX2(_page_size, granularity)); 42 } 43 44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) : 45 _scanned_concurrently(conc_scan), 46 _whole_heap(whole_heap), 47 _guard_index(0), 48 _last_valid_index(0), 49 _page_size(os::vm_page_size()), 50 _byte_map_size(0), 51 _byte_map(NULL), 52 _byte_map_base(NULL), 53 _cur_covered_regions(0), 54 _covered(NULL), 55 _committed(NULL), 56 _guard_region() 57 { 58 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); 59 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); 60 61 assert(card_size <= 512, "card_size must be less than 512"); // why? 62 63 _covered = new MemRegion[_max_covered_regions]; 64 } 65 66 CardTable::~CardTable() { 67 if (_covered) { 68 delete[] _covered; 69 _covered = NULL; 70 } 71 if (_committed) { 72 delete[] _committed; 73 _committed = NULL; 74 } 75 } 76 77 void CardTable::initialize() { 78 _guard_index = cards_required(_whole_heap.word_size()) - 1; 79 _last_valid_index = _guard_index - 1; 80 81 _byte_map_size = compute_byte_map_size(); 82 83 HeapWord* low_bound = _whole_heap.start(); 84 HeapWord* high_bound = _whole_heap.end(); 85 86 _cur_covered_regions = 0; 87 _committed = new MemRegion[_max_covered_regions]; 88 89 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : 90 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); 91 ReservedSpace heap_rs(_byte_map_size, rs_align, false); 92 93 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); 94 95 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1, 96 _page_size, heap_rs.base(), heap_rs.size()); 97 if (!heap_rs.is_reserved()) { 98 vm_exit_during_initialization("Could not reserve enough space for the " 99 "card marking array"); 100 } 101 102 // The assembler store_check code will do an unsigned shift of the oop, 103 // then add it to _byte_map_base, i.e. 104 // 105 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) 106 _byte_map = (CardValue*) heap_rs.base(); 107 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 108 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 109 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 110 111 CardValue* guard_card = &_byte_map[_guard_index]; 112 HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size); 113 _guard_region = MemRegion(guard_page, _page_size); 114 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 115 !ExecMem, "card table last card"); 116 *guard_card = last_card; 117 118 log_trace(gc, barrier)("CardTable::CardTable: "); 119 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 120 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); 121 log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base)); 122 } 123 124 int CardTable::find_covering_region_by_base(HeapWord* base) { 125 int i; 126 for (i = 0; i < _cur_covered_regions; i++) { 127 if (_covered[i].start() == base) return i; 128 if (_covered[i].start() > base) break; 129 } 130 // If we didn't find it, create a new one. 131 assert(_cur_covered_regions < _max_covered_regions, 132 "too many covered regions"); 133 // Move the ones above up, to maintain sorted order. 134 for (int j = _cur_covered_regions; j > i; j--) { 135 _covered[j] = _covered[j-1]; 136 _committed[j] = _committed[j-1]; 137 } 138 int res = i; 139 _cur_covered_regions++; 140 _covered[res].set_start(base); 141 _covered[res].set_word_size(0); 142 CardValue* ct_start = byte_for(base); 143 HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size); 144 _committed[res].set_start(ct_start_aligned); 145 _committed[res].set_word_size(0); 146 return res; 147 } 148 149 int CardTable::find_covering_region_containing(HeapWord* addr) { 150 for (int i = 0; i < _cur_covered_regions; i++) { 151 if (_covered[i].contains(addr)) { 152 return i; 153 } 154 } 155 assert(0, "address outside of heap?"); 156 return -1; 157 } 158 159 HeapWord* CardTable::largest_prev_committed_end(int ind) const { 160 HeapWord* max_end = NULL; 161 for (int j = 0; j < ind; j++) { 162 HeapWord* this_end = _committed[j].end(); 163 if (this_end > max_end) max_end = this_end; 164 } 165 return max_end; 166 } 167 168 MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const { 169 MemRegion result = mr; 170 for (int r = 0; r < _cur_covered_regions; r += 1) { 171 if (r != self) { 172 result = result.minus(_committed[r]); 173 } 174 } 175 // Never include the guard page. 176 result = result.minus(_guard_region); 177 return result; 178 } 179 180 void CardTable::resize_covered_region(MemRegion new_region) { 181 // We don't change the start of a region, only the end. 182 assert(_whole_heap.contains(new_region), 183 "attempt to cover area not in reserved area"); 184 debug_only(verify_guard();) 185 // collided is true if the expansion would push into another committed region 186 debug_only(bool collided = false;) 187 int const ind = find_covering_region_by_base(new_region.start()); 188 MemRegion const old_region = _covered[ind]; 189 assert(old_region.start() == new_region.start(), "just checking"); 190 if (new_region.word_size() != old_region.word_size()) { 191 // Commit new or uncommit old pages, if necessary. 192 MemRegion cur_committed = _committed[ind]; 193 // Extend the end of this _committed region 194 // to cover the end of any lower _committed regions. 195 // This forms overlapping regions, but never interior regions. 196 HeapWord* const max_prev_end = largest_prev_committed_end(ind); 197 if (max_prev_end > cur_committed.end()) { 198 cur_committed.set_end(max_prev_end); 199 } 200 // Align the end up to a page size (starts are already aligned). 201 HeapWord* new_end = (HeapWord*) byte_after(new_region.last()); 202 HeapWord* new_end_aligned = align_up(new_end, _page_size); 203 assert(new_end_aligned >= new_end, "align up, but less"); 204 // Check the other regions (excludes "ind") to ensure that 205 // the new_end_aligned does not intrude onto the committed 206 // space of another region. 207 int ri = 0; 208 for (ri = ind + 1; ri < _cur_covered_regions; ri++) { 209 if (new_end_aligned > _committed[ri].start()) { 210 assert(new_end_aligned <= _committed[ri].end(), 211 "An earlier committed region can't cover a later committed region"); 212 // Any region containing the new end 213 // should start at or beyond the region found (ind) 214 // for the new end (committed regions are not expected to 215 // be proper subsets of other committed regions). 216 assert(_committed[ri].start() >= _committed[ind].start(), 217 "New end of committed region is inconsistent"); 218 new_end_aligned = _committed[ri].start(); 219 // new_end_aligned can be equal to the start of its 220 // committed region (i.e., of "ind") if a second 221 // region following "ind" also start at the same location 222 // as "ind". 223 assert(new_end_aligned >= _committed[ind].start(), 224 "New end of committed region is before start"); 225 debug_only(collided = true;) 226 // Should only collide with 1 region 227 break; 228 } 229 } 230 #ifdef ASSERT 231 for (++ri; ri < _cur_covered_regions; ri++) { 232 assert(!_committed[ri].contains(new_end_aligned), 233 "New end of committed region is in a second committed region"); 234 } 235 #endif 236 // The guard page is always committed and should not be committed over. 237 // "guarded" is used for assertion checking below and recalls the fact 238 // that the would-be end of the new committed region would have 239 // penetrated the guard page. 240 HeapWord* new_end_for_commit = new_end_aligned; 241 242 DEBUG_ONLY(bool guarded = false;) 243 if (new_end_for_commit > _guard_region.start()) { 244 new_end_for_commit = _guard_region.start(); 245 DEBUG_ONLY(guarded = true;) 246 } 247 248 if (new_end_for_commit > cur_committed.end()) { 249 // Must commit new pages. 250 MemRegion const new_committed = 251 MemRegion(cur_committed.end(), new_end_for_commit); 252 253 assert(!new_committed.is_empty(), "Region should not be empty here"); 254 os::commit_memory_or_exit((char*)new_committed.start(), 255 new_committed.byte_size(), _page_size, 256 !ExecMem, "card table expansion"); 257 // Use new_end_aligned (as opposed to new_end_for_commit) because 258 // the cur_committed region may include the guard region. 259 } else if (new_end_aligned < cur_committed.end()) { 260 // Must uncommit pages. 261 MemRegion const uncommit_region = 262 committed_unique_to_self(ind, MemRegion(new_end_aligned, 263 cur_committed.end())); 264 if (!uncommit_region.is_empty()) { 265 // It is not safe to uncommit cards if the boundary between 266 // the generations is moving. A shrink can uncommit cards 267 // owned by generation A but being used by generation B. 268 if (!UseAdaptiveGCBoundary) { 269 if (!os::uncommit_memory((char*)uncommit_region.start(), 270 uncommit_region.byte_size())) { 271 assert(false, "Card table contraction failed"); 272 // The call failed so don't change the end of the 273 // committed region. This is better than taking the 274 // VM down. 275 new_end_aligned = _committed[ind].end(); 276 } 277 } else { 278 new_end_aligned = _committed[ind].end(); 279 } 280 } 281 } 282 // In any case, we can reset the end of the current committed entry. 283 _committed[ind].set_end(new_end_aligned); 284 285 #ifdef ASSERT 286 // Check that the last card in the new region is committed according 287 // to the tables. 288 bool covered = false; 289 for (int cr = 0; cr < _cur_covered_regions; cr++) { 290 if (_committed[cr].contains(new_end - 1)) { 291 covered = true; 292 break; 293 } 294 } 295 assert(covered, "Card for end of new region not committed"); 296 #endif 297 298 // The default of 0 is not necessarily clean cards. 299 CardValue* entry; 300 if (old_region.last() < _whole_heap.start()) { 301 entry = byte_for(_whole_heap.start()); 302 } else { 303 entry = byte_after(old_region.last()); 304 } 305 assert(index_for(new_region.last()) < _guard_index, 306 "The guard card will be overwritten"); 307 // This line commented out cleans the newly expanded region and 308 // not the aligned up expanded region. 309 // CardValue* const end = byte_after(new_region.last()); 310 CardValue* const end = (CardValue*) new_end_for_commit; 311 assert((end >= byte_after(new_region.last())) || collided || guarded, 312 "Expect to be beyond new region unless impacting another region"); 313 // do nothing if we resized downward. 314 #ifdef ASSERT 315 for (int ri = 0; ri < _cur_covered_regions; ri++) { 316 if (ri != ind) { 317 // The end of the new committed region should not 318 // be in any existing region unless it matches 319 // the start of the next region. 320 assert(!_committed[ri].contains(end) || 321 (_committed[ri].start() == (HeapWord*) end), 322 "Overlapping committed regions"); 323 } 324 } 325 #endif 326 if (entry < end) { 327 memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue))); 328 } 329 } 330 // In any case, the covered size changes. 331 _covered[ind].set_word_size(new_region.word_size()); 332 333 log_trace(gc, barrier)("CardTable::resize_covered_region: "); 334 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, 335 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); 336 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, 337 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); 338 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, 339 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); 340 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, 341 p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last()))); 342 343 // Touch the last card of the covered region to show that it 344 // is committed (or SEGV). 345 debug_only((void) (*byte_for(_covered[ind].last()));) 346 debug_only(verify_guard();) 347 } 348 349 // Note that these versions are precise! The scanning code has to handle the 350 // fact that the write barrier may be either precise or imprecise. 351 void CardTable::dirty_MemRegion(MemRegion mr) { 352 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 353 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 354 CardValue* cur = byte_for(mr.start()); 355 CardValue* last = byte_after(mr.last()); 356 while (cur < last) { 357 *cur = dirty_card; 358 cur++; 359 } 360 } 361 362 void CardTable::clear_MemRegion(MemRegion mr) { 363 // Be conservative: only clean cards entirely contained within the 364 // region. 365 CardValue* cur; 366 if (mr.start() == _whole_heap.start()) { 367 cur = byte_for(mr.start()); 368 } else { 369 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 370 cur = byte_after(mr.start() - 1); 371 } 372 CardValue* last = byte_after(mr.last()); 373 memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue))); 374 } 375 376 void CardTable::clear(MemRegion mr) { 377 for (int i = 0; i < _cur_covered_regions; i++) { 378 MemRegion mri = mr.intersection(_covered[i]); 379 if (!mri.is_empty()) clear_MemRegion(mri); 380 } 381 } 382 383 void CardTable::dirty(MemRegion mr) { 384 CardValue* first = byte_for(mr.start()); 385 CardValue* last = byte_after(mr.last()); 386 memset(first, dirty_card, last-first); 387 } 388 389 // Unlike several other card table methods, dirty_card_iterate() 390 // iterates over dirty cards ranges in increasing address order. 391 void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { 392 for (int i = 0; i < _cur_covered_regions; i++) { 393 MemRegion mri = mr.intersection(_covered[i]); 394 if (!mri.is_empty()) { 395 CardValue *cur_entry, *next_entry, *limit; 396 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 397 cur_entry <= limit; 398 cur_entry = next_entry) { 399 next_entry = cur_entry + 1; 400 if (*cur_entry == dirty_card) { 401 size_t dirty_cards; 402 // Accumulate maximal dirty card range, starting at cur_entry 403 for (dirty_cards = 1; 404 next_entry <= limit && *next_entry == dirty_card; 405 dirty_cards++, next_entry++); 406 MemRegion cur_cards(addr_for(cur_entry), 407 dirty_cards*card_size_in_words); 408 cl->do_MemRegion(cur_cards); 409 } 410 } 411 } 412 } 413 } 414 415 MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr, 416 bool reset, 417 int reset_val) { 418 for (int i = 0; i < _cur_covered_regions; i++) { 419 MemRegion mri = mr.intersection(_covered[i]); 420 if (!mri.is_empty()) { 421 CardValue* cur_entry, *next_entry, *limit; 422 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 423 cur_entry <= limit; 424 cur_entry = next_entry) { 425 next_entry = cur_entry + 1; 426 if (*cur_entry == dirty_card) { 427 size_t dirty_cards; 428 // Accumulate maximal dirty card range, starting at cur_entry 429 for (dirty_cards = 1; 430 next_entry <= limit && *next_entry == dirty_card; 431 dirty_cards++, next_entry++); 432 MemRegion cur_cards(addr_for(cur_entry), 433 dirty_cards*card_size_in_words); 434 if (reset) { 435 for (size_t i = 0; i < dirty_cards; i++) { 436 cur_entry[i] = reset_val; 437 } 438 } 439 return cur_cards; 440 } 441 } 442 } 443 } 444 return MemRegion(mr.end(), mr.end()); 445 } 446 447 uintx CardTable::ct_max_alignment_constraint() { 448 return card_size * os::vm_page_size(); 449 } 450 451 void CardTable::verify_guard() { 452 // For product build verification 453 guarantee(_byte_map[_guard_index] == last_card, 454 "card table guard has been modified"); 455 } 456 457 void CardTable::invalidate(MemRegion mr) { 458 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 459 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 460 for (int i = 0; i < _cur_covered_regions; i++) { 461 MemRegion mri = mr.intersection(_covered[i]); 462 if (!mri.is_empty()) dirty_MemRegion(mri); 463 } 464 } 465 466 void CardTable::verify() { 467 verify_guard(); 468 } 469 470 #ifndef PRODUCT 471 void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) { 472 CardValue* start = byte_for(mr.start()); 473 CardValue* end = byte_for(mr.last()); 474 bool failures = false; 475 for (CardValue* curr = start; curr <= end; ++curr) { 476 CardValue curr_val = *curr; 477 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 478 if (failed) { 479 if (!failures) { 480 log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); 481 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); 482 failures = true; 483 } 484 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", 485 p2i(curr), p2i(addr_for(curr)), 486 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), 487 (int) curr_val); 488 } 489 } 490 guarantee(!failures, "there should not have been any failures"); 491 } 492 493 void CardTable::verify_not_dirty_region(MemRegion mr) { 494 verify_region(mr, dirty_card, false /* val_equals */); 495 } 496 497 void CardTable::verify_dirty_region(MemRegion mr) { 498 verify_region(mr, dirty_card, true /* val_equals */); 499 } 500 #endif 501 502 void CardTable::print_on(outputStream* st) const { 503 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT, 504 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base)); 505 }