1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTable.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "gc/shared/space.inline.hpp" 29 #include "logging/log.hpp" 30 #include "memory/virtualspace.hpp" 31 #include "runtime/java.hpp" 32 #include "runtime/os.hpp" 33 #include "services/memTracker.hpp" 34 35 size_t CardTable::compute_byte_map_size() { 36 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, 37 "uninitialized, check declaration order"); 38 assert(_page_size != 0, "uninitialized, check declaration order"); 39 const size_t granularity = os::vm_allocation_granularity(); 40 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); 41 } 42 43 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) : 44 _whole_heap(whole_heap), 45 _scanned_concurrently(conc_scan), 46 _guard_index(0), 47 _guard_region(), 48 _last_valid_index(0), 49 _page_size(os::vm_page_size()), 50 _byte_map_size(0), 51 _covered(NULL), 52 _committed(NULL), 53 _cur_covered_regions(0), 54 _byte_map(NULL), 55 _byte_map_base(NULL) 56 { 57 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); 58 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); 59 60 assert(card_size <= 512, "card_size must be less than 512"); // why? 61 62 _covered = new MemRegion[_max_covered_regions]; 63 if (_covered == NULL) { 64 vm_exit_during_initialization("Could not allocate card table covered region set."); 65 } 66 } 67 68 CardTable::~CardTable() { 69 if (_covered) { 70 delete[] _covered; 71 _covered = NULL; 72 } 73 if (_committed) { 74 delete[] _committed; 75 _committed = NULL; 76 } 77 } 78 79 void CardTable::initialize() { 80 _guard_index = cards_required(_whole_heap.word_size()) - 1; 81 _last_valid_index = _guard_index - 1; 82 83 _byte_map_size = compute_byte_map_size(); 84 85 HeapWord* low_bound = _whole_heap.start(); 86 HeapWord* high_bound = _whole_heap.end(); 87 88 _cur_covered_regions = 0; 89 _committed = new MemRegion[_max_covered_regions]; 90 if (_committed == NULL) { 91 vm_exit_during_initialization("Could not allocate card table committed region set."); 92 } 93 94 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : 95 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); 96 ReservedSpace heap_rs(_byte_map_size, rs_align, false); 97 98 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); 99 100 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1, 101 _page_size, heap_rs.base(), heap_rs.size()); 102 if (!heap_rs.is_reserved()) { 103 vm_exit_during_initialization("Could not reserve enough space for the " 104 "card marking array"); 105 } 106 107 // The assembler store_check code will do an unsigned shift of the oop, 108 // then add it to _byte_map_base, i.e. 109 // 110 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) 111 _byte_map = (jbyte*) heap_rs.base(); 112 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 113 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 114 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 115 116 jbyte* guard_card = &_byte_map[_guard_index]; 117 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); 118 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); 119 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 120 !ExecMem, "card table last card"); 121 *guard_card = last_card; 122 123 log_trace(gc, barrier)("CardTable::CardTable: "); 124 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 125 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); 126 log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base)); 127 } 128 129 int CardTable::find_covering_region_by_base(HeapWord* base) { 130 int i; 131 for (i = 0; i < _cur_covered_regions; i++) { 132 if (_covered[i].start() == base) return i; 133 if (_covered[i].start() > base) break; 134 } 135 // If we didn't find it, create a new one. 136 assert(_cur_covered_regions < _max_covered_regions, 137 "too many covered regions"); 138 // Move the ones above up, to maintain sorted order. 139 for (int j = _cur_covered_regions; j > i; j--) { 140 _covered[j] = _covered[j-1]; 141 _committed[j] = _committed[j-1]; 142 } 143 int res = i; 144 _cur_covered_regions++; 145 _covered[res].set_start(base); 146 _covered[res].set_word_size(0); 147 jbyte* ct_start = byte_for(base); 148 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); 149 _committed[res].set_start((HeapWord*)ct_start_aligned); 150 _committed[res].set_word_size(0); 151 return res; 152 } 153 154 int CardTable::find_covering_region_containing(HeapWord* addr) { 155 for (int i = 0; i < _cur_covered_regions; i++) { 156 if (_covered[i].contains(addr)) { 157 return i; 158 } 159 } 160 assert(0, "address outside of heap?"); 161 return -1; 162 } 163 164 HeapWord* CardTable::largest_prev_committed_end(int ind) const { 165 HeapWord* max_end = NULL; 166 for (int j = 0; j < ind; j++) { 167 HeapWord* this_end = _committed[j].end(); 168 if (this_end > max_end) max_end = this_end; 169 } 170 return max_end; 171 } 172 173 MemRegion CardTable::committed_unique_to_self(int self, 174 MemRegion mr) const { 175 MemRegion result = mr; 176 for (int r = 0; r < _cur_covered_regions; r += 1) { 177 if (r != self) { 178 result = result.minus(_committed[r]); 179 } 180 } 181 // Never include the guard page. 182 result = result.minus(_guard_region); 183 return result; 184 } 185 186 void CardTable::resize_covered_region(MemRegion new_region) { 187 // We don't change the start of a region, only the end. 188 assert(_whole_heap.contains(new_region), 189 "attempt to cover area not in reserved area"); 190 debug_only(verify_guard();) 191 // collided is true if the expansion would push into another committed region 192 debug_only(bool collided = false;) 193 int const ind = find_covering_region_by_base(new_region.start()); 194 MemRegion const old_region = _covered[ind]; 195 assert(old_region.start() == new_region.start(), "just checking"); 196 if (new_region.word_size() != old_region.word_size()) { 197 // Commit new or uncommit old pages, if necessary. 198 MemRegion cur_committed = _committed[ind]; 199 // Extend the end of this _committed region 200 // to cover the end of any lower _committed regions. 201 // This forms overlapping regions, but never interior regions. 202 HeapWord* const max_prev_end = largest_prev_committed_end(ind); 203 if (max_prev_end > cur_committed.end()) { 204 cur_committed.set_end(max_prev_end); 205 } 206 // Align the end up to a page size (starts are already aligned). 207 jbyte* const new_end = byte_after(new_region.last()); 208 HeapWord* new_end_aligned = 209 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); 210 assert(new_end_aligned >= (HeapWord*) new_end, 211 "align up, but less"); 212 // Check the other regions (excludes "ind") to ensure that 213 // the new_end_aligned does not intrude onto the committed 214 // space of another region. 215 int ri = 0; 216 for (ri = ind + 1; ri < _cur_covered_regions; ri++) { 217 if (new_end_aligned > _committed[ri].start()) { 218 assert(new_end_aligned <= _committed[ri].end(), 219 "An earlier committed region can't cover a later committed region"); 220 // Any region containing the new end 221 // should start at or beyond the region found (ind) 222 // for the new end (committed regions are not expected to 223 // be proper subsets of other committed regions). 224 assert(_committed[ri].start() >= _committed[ind].start(), 225 "New end of committed region is inconsistent"); 226 new_end_aligned = _committed[ri].start(); 227 // new_end_aligned can be equal to the start of its 228 // committed region (i.e., of "ind") if a second 229 // region following "ind" also start at the same location 230 // as "ind". 231 assert(new_end_aligned >= _committed[ind].start(), 232 "New end of committed region is before start"); 233 debug_only(collided = true;) 234 // Should only collide with 1 region 235 break; 236 } 237 } 238 #ifdef ASSERT 239 for (++ri; ri < _cur_covered_regions; ri++) { 240 assert(!_committed[ri].contains(new_end_aligned), 241 "New end of committed region is in a second committed region"); 242 } 243 #endif 244 // The guard page is always committed and should not be committed over. 245 // "guarded" is used for assertion checking below and recalls the fact 246 // that the would-be end of the new committed region would have 247 // penetrated the guard page. 248 HeapWord* new_end_for_commit = new_end_aligned; 249 250 DEBUG_ONLY(bool guarded = false;) 251 if (new_end_for_commit > _guard_region.start()) { 252 new_end_for_commit = _guard_region.start(); 253 DEBUG_ONLY(guarded = true;) 254 } 255 256 if (new_end_for_commit > cur_committed.end()) { 257 // Must commit new pages. 258 MemRegion const new_committed = 259 MemRegion(cur_committed.end(), new_end_for_commit); 260 261 assert(!new_committed.is_empty(), "Region should not be empty here"); 262 os::commit_memory_or_exit((char*)new_committed.start(), 263 new_committed.byte_size(), _page_size, 264 !ExecMem, "card table expansion"); 265 // Use new_end_aligned (as opposed to new_end_for_commit) because 266 // the cur_committed region may include the guard region. 267 } else if (new_end_aligned < cur_committed.end()) { 268 // Must uncommit pages. 269 MemRegion const uncommit_region = 270 committed_unique_to_self(ind, MemRegion(new_end_aligned, 271 cur_committed.end())); 272 if (!uncommit_region.is_empty()) { 273 // It is not safe to uncommit cards if the boundary between 274 // the generations is moving. A shrink can uncommit cards 275 // owned by generation A but being used by generation B. 276 if (!UseAdaptiveGCBoundary) { 277 if (!os::uncommit_memory((char*)uncommit_region.start(), 278 uncommit_region.byte_size())) { 279 assert(false, "Card table contraction failed"); 280 // The call failed so don't change the end of the 281 // committed region. This is better than taking the 282 // VM down. 283 new_end_aligned = _committed[ind].end(); 284 } 285 } else { 286 new_end_aligned = _committed[ind].end(); 287 } 288 } 289 } 290 // In any case, we can reset the end of the current committed entry. 291 _committed[ind].set_end(new_end_aligned); 292 293 #ifdef ASSERT 294 // Check that the last card in the new region is committed according 295 // to the tables. 296 bool covered = false; 297 for (int cr = 0; cr < _cur_covered_regions; cr++) { 298 if (_committed[cr].contains(new_end - 1)) { 299 covered = true; 300 break; 301 } 302 } 303 assert(covered, "Card for end of new region not committed"); 304 #endif 305 306 // The default of 0 is not necessarily clean cards. 307 jbyte* entry; 308 if (old_region.last() < _whole_heap.start()) { 309 entry = byte_for(_whole_heap.start()); 310 } else { 311 entry = byte_after(old_region.last()); 312 } 313 assert(index_for(new_region.last()) < _guard_index, 314 "The guard card will be overwritten"); 315 // This line commented out cleans the newly expanded region and 316 // not the aligned up expanded region. 317 // jbyte* const end = byte_after(new_region.last()); 318 jbyte* const end = (jbyte*) new_end_for_commit; 319 assert((end >= byte_after(new_region.last())) || collided || guarded, 320 "Expect to be beyond new region unless impacting another region"); 321 // do nothing if we resized downward. 322 #ifdef ASSERT 323 for (int ri = 0; ri < _cur_covered_regions; ri++) { 324 if (ri != ind) { 325 // The end of the new committed region should not 326 // be in any existing region unless it matches 327 // the start of the next region. 328 assert(!_committed[ri].contains(end) || 329 (_committed[ri].start() == (HeapWord*) end), 330 "Overlapping committed regions"); 331 } 332 } 333 #endif 334 if (entry < end) { 335 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); 336 } 337 } 338 // In any case, the covered size changes. 339 _covered[ind].set_word_size(new_region.word_size()); 340 341 log_trace(gc, barrier)("CardTable::resize_covered_region: "); 342 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, 343 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); 344 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, 345 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); 346 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, 347 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); 348 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, 349 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); 350 351 // Touch the last card of the covered region to show that it 352 // is committed (or SEGV). 353 debug_only((void) (*byte_for(_covered[ind].last()));) 354 debug_only(verify_guard();) 355 } 356 357 // Note that these versions are precise! The scanning code has to handle the 358 // fact that the write barrier may be either precise or imprecise. 359 void CardTable::dirty_MemRegion(MemRegion mr) { 360 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 361 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 362 jbyte* cur = byte_for(mr.start()); 363 jbyte* last = byte_after(mr.last()); 364 while (cur < last) { 365 *cur = dirty_card; 366 cur++; 367 } 368 } 369 370 void CardTable::clear_MemRegion(MemRegion mr) { 371 // Be conservative: only clean cards entirely contained within the 372 // region. 373 jbyte* cur; 374 if (mr.start() == _whole_heap.start()) { 375 cur = byte_for(mr.start()); 376 } else { 377 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 378 cur = byte_after(mr.start() - 1); 379 } 380 jbyte* last = byte_after(mr.last()); 381 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); 382 } 383 384 void CardTable::clear(MemRegion mr) { 385 for (int i = 0; i < _cur_covered_regions; i++) { 386 MemRegion mri = mr.intersection(_covered[i]); 387 if (!mri.is_empty()) clear_MemRegion(mri); 388 } 389 } 390 391 void CardTable::dirty(MemRegion mr) { 392 jbyte* first = byte_for(mr.start()); 393 jbyte* last = byte_after(mr.last()); 394 memset(first, dirty_card, last-first); 395 } 396 397 // Unlike several other card table methods, dirty_card_iterate() 398 // iterates over dirty cards ranges in increasing address order. 399 void CardTable::dirty_card_iterate(MemRegion mr, 400 MemRegionClosure* cl) { 401 for (int i = 0; i < _cur_covered_regions; i++) { 402 MemRegion mri = mr.intersection(_covered[i]); 403 if (!mri.is_empty()) { 404 jbyte *cur_entry, *next_entry, *limit; 405 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 406 cur_entry <= limit; 407 cur_entry = next_entry) { 408 next_entry = cur_entry + 1; 409 if (*cur_entry == dirty_card) { 410 size_t dirty_cards; 411 // Accumulate maximal dirty card range, starting at cur_entry 412 for (dirty_cards = 1; 413 next_entry <= limit && *next_entry == dirty_card; 414 dirty_cards++, next_entry++); 415 MemRegion cur_cards(addr_for(cur_entry), 416 dirty_cards*card_size_in_words); 417 cl->do_MemRegion(cur_cards); 418 } 419 } 420 } 421 } 422 } 423 424 MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr, 425 bool reset, 426 int reset_val) { 427 for (int i = 0; i < _cur_covered_regions; i++) { 428 MemRegion mri = mr.intersection(_covered[i]); 429 if (!mri.is_empty()) { 430 jbyte* cur_entry, *next_entry, *limit; 431 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 432 cur_entry <= limit; 433 cur_entry = next_entry) { 434 next_entry = cur_entry + 1; 435 if (*cur_entry == dirty_card) { 436 size_t dirty_cards; 437 // Accumulate maximal dirty card range, starting at cur_entry 438 for (dirty_cards = 1; 439 next_entry <= limit && *next_entry == dirty_card; 440 dirty_cards++, next_entry++); 441 MemRegion cur_cards(addr_for(cur_entry), 442 dirty_cards*card_size_in_words); 443 if (reset) { 444 for (size_t i = 0; i < dirty_cards; i++) { 445 cur_entry[i] = reset_val; 446 } 447 } 448 return cur_cards; 449 } 450 } 451 } 452 } 453 return MemRegion(mr.end(), mr.end()); 454 } 455 456 uintx CardTable::ct_max_alignment_constraint() { 457 return card_size * os::vm_page_size(); 458 } 459 460 void CardTable::verify_guard() { 461 // For product build verification 462 guarantee(_byte_map[_guard_index] == last_card, 463 "card table guard has been modified"); 464 } 465 466 void CardTable::invalidate(MemRegion mr) { 467 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 468 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 469 for (int i = 0; i < _cur_covered_regions; i++) { 470 MemRegion mri = mr.intersection(_covered[i]); 471 if (!mri.is_empty()) dirty_MemRegion(mri); 472 } 473 } 474 475 void CardTable::verify() { 476 verify_guard(); 477 } 478 479 #ifndef PRODUCT 480 void CardTable::verify_region(MemRegion mr, 481 jbyte val, bool val_equals) { 482 jbyte* start = byte_for(mr.start()); 483 jbyte* end = byte_for(mr.last()); 484 bool failures = false; 485 for (jbyte* curr = start; curr <= end; ++curr) { 486 jbyte curr_val = *curr; 487 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 488 if (failed) { 489 if (!failures) { 490 log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); 491 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); 492 failures = true; 493 } 494 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", 495 p2i(curr), p2i(addr_for(curr)), 496 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), 497 (int) curr_val); 498 } 499 } 500 guarantee(!failures, "there should not have been any failures"); 501 } 502 503 void CardTable::verify_not_dirty_region(MemRegion mr) { 504 verify_region(mr, dirty_card, false /* val_equals */); 505 } 506 507 void CardTable::verify_dirty_region(MemRegion mr) { 508 verify_region(mr, dirty_card, true /* val_equals */); 509 } 510 #endif 511 512 void CardTable::print_on(outputStream* st) const { 513 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT, 514 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base)); 515 }