1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTableModRefBS.inline.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "gc/shared/genCollectedHeap.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/virtualspace.hpp" 31 #include "services/memTracker.hpp" 32 #include "utilities/macros.hpp" 33 34 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 35 // enumerate ref fields that have been modified (since the last 36 // enumeration.) 37 38 size_t CardTableModRefBS::compute_byte_map_size() 39 { 40 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, 41 "uninitialized, check declaration order"); 42 assert(_page_size != 0, "uninitialized, check declaration order"); 43 const size_t granularity = os::vm_allocation_granularity(); 44 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); 45 } 46 47 CardTableModRefBS::CardTableModRefBS( 48 MemRegion whole_heap, 49 const BarrierSet::FakeRtti& fake_rtti) : 50 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)), 51 _whole_heap(whole_heap), 52 _guard_index(0), 53 _guard_region(), 54 _last_valid_index(0), 55 _page_size(os::vm_page_size()), 56 _byte_map_size(0), 57 _covered(NULL), 58 _committed(NULL), 59 _cur_covered_regions(0), 60 _byte_map(NULL), 61 byte_map_base(NULL) 62 { 63 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); 64 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); 65 66 assert(card_size <= 512, "card_size must be less than 512"); // why? 67 68 _covered = new MemRegion[_max_covered_regions]; 69 if (_covered == NULL) { 70 vm_exit_during_initialization("Could not allocate card table covered region set."); 71 } 72 } 73 74 void CardTableModRefBS::initialize() { 75 _guard_index = cards_required(_whole_heap.word_size()) - 1; 76 _last_valid_index = _guard_index - 1; 77 78 _byte_map_size = compute_byte_map_size(); 79 80 HeapWord* low_bound = _whole_heap.start(); 81 HeapWord* high_bound = _whole_heap.end(); 82 83 _cur_covered_regions = 0; 84 _committed = new MemRegion[_max_covered_regions]; 85 if (_committed == NULL) { 86 vm_exit_during_initialization("Could not allocate card table committed region set."); 87 } 88 89 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : 90 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); 91 ReservedSpace heap_rs(_byte_map_size, rs_align, false); 92 93 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); 94 95 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, 96 _page_size, heap_rs.base(), heap_rs.size()); 97 if (!heap_rs.is_reserved()) { 98 vm_exit_during_initialization("Could not reserve enough space for the " 99 "card marking array"); 100 } 101 102 // The assembler store_check code will do an unsigned shift of the oop, 103 // then add it to byte_map_base, i.e. 104 // 105 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) 106 _byte_map = (jbyte*) heap_rs.base(); 107 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 108 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 109 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 110 111 jbyte* guard_card = &_byte_map[_guard_index]; 112 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); 113 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); 114 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 115 !ExecMem, "card table last card"); 116 *guard_card = last_card; 117 118 if (TraceCardTableModRefBS) { 119 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); 120 gclog_or_tty->print_cr(" " 121 " &_byte_map[0]: " INTPTR_FORMAT 122 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 123 p2i(&_byte_map[0]), 124 p2i(&_byte_map[_last_valid_index])); 125 gclog_or_tty->print_cr(" " 126 " byte_map_base: " INTPTR_FORMAT, 127 p2i(byte_map_base)); 128 } 129 } 130 131 CardTableModRefBS::~CardTableModRefBS() { 132 if (_covered) { 133 delete[] _covered; 134 _covered = NULL; 135 } 136 if (_committed) { 137 delete[] _committed; 138 _committed = NULL; 139 } 140 } 141 142 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { 143 int i; 144 for (i = 0; i < _cur_covered_regions; i++) { 145 if (_covered[i].start() == base) return i; 146 if (_covered[i].start() > base) break; 147 } 148 // If we didn't find it, create a new one. 149 assert(_cur_covered_regions < _max_covered_regions, 150 "too many covered regions"); 151 // Move the ones above up, to maintain sorted order. 152 for (int j = _cur_covered_regions; j > i; j--) { 153 _covered[j] = _covered[j-1]; 154 _committed[j] = _committed[j-1]; 155 } 156 int res = i; 157 _cur_covered_regions++; 158 _covered[res].set_start(base); 159 _covered[res].set_word_size(0); 160 jbyte* ct_start = byte_for(base); 161 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); 162 _committed[res].set_start((HeapWord*)ct_start_aligned); 163 _committed[res].set_word_size(0); 164 return res; 165 } 166 167 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { 168 for (int i = 0; i < _cur_covered_regions; i++) { 169 if (_covered[i].contains(addr)) { 170 return i; 171 } 172 } 173 assert(0, "address outside of heap?"); 174 return -1; 175 } 176 177 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { 178 HeapWord* max_end = NULL; 179 for (int j = 0; j < ind; j++) { 180 HeapWord* this_end = _committed[j].end(); 181 if (this_end > max_end) max_end = this_end; 182 } 183 return max_end; 184 } 185 186 MemRegion CardTableModRefBS::committed_unique_to_self(int self, 187 MemRegion mr) const { 188 MemRegion result = mr; 189 for (int r = 0; r < _cur_covered_regions; r += 1) { 190 if (r != self) { 191 result = result.minus(_committed[r]); 192 } 193 } 194 // Never include the guard page. 195 result = result.minus(_guard_region); 196 return result; 197 } 198 199 void CardTableModRefBS::resize_covered_region(MemRegion new_region) { 200 // We don't change the start of a region, only the end. 201 assert(_whole_heap.contains(new_region), 202 "attempt to cover area not in reserved area"); 203 debug_only(verify_guard();) 204 // collided is true if the expansion would push into another committed region 205 debug_only(bool collided = false;) 206 int const ind = find_covering_region_by_base(new_region.start()); 207 MemRegion const old_region = _covered[ind]; 208 assert(old_region.start() == new_region.start(), "just checking"); 209 if (new_region.word_size() != old_region.word_size()) { 210 // Commit new or uncommit old pages, if necessary. 211 MemRegion cur_committed = _committed[ind]; 212 // Extend the end of this _committed region 213 // to cover the end of any lower _committed regions. 214 // This forms overlapping regions, but never interior regions. 215 HeapWord* const max_prev_end = largest_prev_committed_end(ind); 216 if (max_prev_end > cur_committed.end()) { 217 cur_committed.set_end(max_prev_end); 218 } 219 // Align the end up to a page size (starts are already aligned). 220 jbyte* const new_end = byte_after(new_region.last()); 221 HeapWord* new_end_aligned = 222 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); 223 assert(new_end_aligned >= (HeapWord*) new_end, 224 "align up, but less"); 225 // Check the other regions (excludes "ind") to ensure that 226 // the new_end_aligned does not intrude onto the committed 227 // space of another region. 228 int ri = 0; 229 for (ri = ind + 1; ri < _cur_covered_regions; ri++) { 230 if (new_end_aligned > _committed[ri].start()) { 231 assert(new_end_aligned <= _committed[ri].end(), 232 "An earlier committed region can't cover a later committed region"); 233 // Any region containing the new end 234 // should start at or beyond the region found (ind) 235 // for the new end (committed regions are not expected to 236 // be proper subsets of other committed regions). 237 assert(_committed[ri].start() >= _committed[ind].start(), 238 "New end of committed region is inconsistent"); 239 new_end_aligned = _committed[ri].start(); 240 // new_end_aligned can be equal to the start of its 241 // committed region (i.e., of "ind") if a second 242 // region following "ind" also start at the same location 243 // as "ind". 244 assert(new_end_aligned >= _committed[ind].start(), 245 "New end of committed region is before start"); 246 debug_only(collided = true;) 247 // Should only collide with 1 region 248 break; 249 } 250 } 251 #ifdef ASSERT 252 for (++ri; ri < _cur_covered_regions; ri++) { 253 assert(!_committed[ri].contains(new_end_aligned), 254 "New end of committed region is in a second committed region"); 255 } 256 #endif 257 // The guard page is always committed and should not be committed over. 258 // "guarded" is used for assertion checking below and recalls the fact 259 // that the would-be end of the new committed region would have 260 // penetrated the guard page. 261 HeapWord* new_end_for_commit = new_end_aligned; 262 263 DEBUG_ONLY(bool guarded = false;) 264 if (new_end_for_commit > _guard_region.start()) { 265 new_end_for_commit = _guard_region.start(); 266 DEBUG_ONLY(guarded = true;) 267 } 268 269 if (new_end_for_commit > cur_committed.end()) { 270 // Must commit new pages. 271 MemRegion const new_committed = 272 MemRegion(cur_committed.end(), new_end_for_commit); 273 274 assert(!new_committed.is_empty(), "Region should not be empty here"); 275 os::commit_memory_or_exit((char*)new_committed.start(), 276 new_committed.byte_size(), _page_size, 277 !ExecMem, "card table expansion"); 278 // Use new_end_aligned (as opposed to new_end_for_commit) because 279 // the cur_committed region may include the guard region. 280 } else if (new_end_aligned < cur_committed.end()) { 281 // Must uncommit pages. 282 MemRegion const uncommit_region = 283 committed_unique_to_self(ind, MemRegion(new_end_aligned, 284 cur_committed.end())); 285 if (!uncommit_region.is_empty()) { 286 // It is not safe to uncommit cards if the boundary between 287 // the generations is moving. A shrink can uncommit cards 288 // owned by generation A but being used by generation B. 289 if (!UseAdaptiveGCBoundary) { 290 if (!os::uncommit_memory((char*)uncommit_region.start(), 291 uncommit_region.byte_size())) { 292 assert(false, "Card table contraction failed"); 293 // The call failed so don't change the end of the 294 // committed region. This is better than taking the 295 // VM down. 296 new_end_aligned = _committed[ind].end(); 297 } 298 } else { 299 new_end_aligned = _committed[ind].end(); 300 } 301 } 302 } 303 // In any case, we can reset the end of the current committed entry. 304 _committed[ind].set_end(new_end_aligned); 305 306 #ifdef ASSERT 307 // Check that the last card in the new region is committed according 308 // to the tables. 309 bool covered = false; 310 for (int cr = 0; cr < _cur_covered_regions; cr++) { 311 if (_committed[cr].contains(new_end - 1)) { 312 covered = true; 313 break; 314 } 315 } 316 assert(covered, "Card for end of new region not committed"); 317 #endif 318 319 // The default of 0 is not necessarily clean cards. 320 jbyte* entry; 321 if (old_region.last() < _whole_heap.start()) { 322 entry = byte_for(_whole_heap.start()); 323 } else { 324 entry = byte_after(old_region.last()); 325 } 326 assert(index_for(new_region.last()) < _guard_index, 327 "The guard card will be overwritten"); 328 // This line commented out cleans the newly expanded region and 329 // not the aligned up expanded region. 330 // jbyte* const end = byte_after(new_region.last()); 331 jbyte* const end = (jbyte*) new_end_for_commit; 332 assert((end >= byte_after(new_region.last())) || collided || guarded, 333 "Expect to be beyond new region unless impacting another region"); 334 // do nothing if we resized downward. 335 #ifdef ASSERT 336 for (int ri = 0; ri < _cur_covered_regions; ri++) { 337 if (ri != ind) { 338 // The end of the new committed region should not 339 // be in any existing region unless it matches 340 // the start of the next region. 341 assert(!_committed[ri].contains(end) || 342 (_committed[ri].start() == (HeapWord*) end), 343 "Overlapping committed regions"); 344 } 345 } 346 #endif 347 if (entry < end) { 348 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); 349 } 350 } 351 // In any case, the covered size changes. 352 _covered[ind].set_word_size(new_region.word_size()); 353 if (TraceCardTableModRefBS) { 354 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); 355 gclog_or_tty->print_cr(" " 356 " _covered[%d].start(): " INTPTR_FORMAT 357 " _covered[%d].last(): " INTPTR_FORMAT, 358 ind, p2i(_covered[ind].start()), 359 ind, p2i(_covered[ind].last())); 360 gclog_or_tty->print_cr(" " 361 " _committed[%d].start(): " INTPTR_FORMAT 362 " _committed[%d].last(): " INTPTR_FORMAT, 363 ind, p2i(_committed[ind].start()), 364 ind, p2i(_committed[ind].last())); 365 gclog_or_tty->print_cr(" " 366 " byte_for(start): " INTPTR_FORMAT 367 " byte_for(last): " INTPTR_FORMAT, 368 p2i(byte_for(_covered[ind].start())), 369 p2i(byte_for(_covered[ind].last()))); 370 gclog_or_tty->print_cr(" " 371 " addr_for(start): " INTPTR_FORMAT 372 " addr_for(last): " INTPTR_FORMAT, 373 p2i(addr_for((jbyte*) _committed[ind].start())), 374 p2i(addr_for((jbyte*) _committed[ind].last()))); 375 } 376 // Touch the last card of the covered region to show that it 377 // is committed (or SEGV). 378 debug_only((void) (*byte_for(_covered[ind].last()));) 379 debug_only(verify_guard();) 380 } 381 382 // Note that these versions are precise! The scanning code has to handle the 383 // fact that the write barrier may be either precise or imprecise. 384 385 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { 386 inline_write_ref_field(field, newVal, release); 387 } 388 389 390 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { 391 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 392 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 393 jbyte* cur = byte_for(mr.start()); 394 jbyte* last = byte_after(mr.last()); 395 while (cur < last) { 396 *cur = dirty_card; 397 cur++; 398 } 399 } 400 401 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { 402 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 403 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 404 for (int i = 0; i < _cur_covered_regions; i++) { 405 MemRegion mri = mr.intersection(_covered[i]); 406 if (!mri.is_empty()) dirty_MemRegion(mri); 407 } 408 } 409 410 void CardTableModRefBS::clear_MemRegion(MemRegion mr) { 411 // Be conservative: only clean cards entirely contained within the 412 // region. 413 jbyte* cur; 414 if (mr.start() == _whole_heap.start()) { 415 cur = byte_for(mr.start()); 416 } else { 417 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 418 cur = byte_after(mr.start() - 1); 419 } 420 jbyte* last = byte_after(mr.last()); 421 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); 422 } 423 424 void CardTableModRefBS::clear(MemRegion mr) { 425 for (int i = 0; i < _cur_covered_regions; i++) { 426 MemRegion mri = mr.intersection(_covered[i]); 427 if (!mri.is_empty()) clear_MemRegion(mri); 428 } 429 } 430 431 void CardTableModRefBS::dirty(MemRegion mr) { 432 jbyte* first = byte_for(mr.start()); 433 jbyte* last = byte_after(mr.last()); 434 memset(first, dirty_card, last-first); 435 } 436 437 // Unlike several other card table methods, dirty_card_iterate() 438 // iterates over dirty cards ranges in increasing address order. 439 void CardTableModRefBS::dirty_card_iterate(MemRegion mr, 440 MemRegionClosure* cl) { 441 for (int i = 0; i < _cur_covered_regions; i++) { 442 MemRegion mri = mr.intersection(_covered[i]); 443 if (!mri.is_empty()) { 444 jbyte *cur_entry, *next_entry, *limit; 445 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 446 cur_entry <= limit; 447 cur_entry = next_entry) { 448 next_entry = cur_entry + 1; 449 if (*cur_entry == dirty_card) { 450 size_t dirty_cards; 451 // Accumulate maximal dirty card range, starting at cur_entry 452 for (dirty_cards = 1; 453 next_entry <= limit && *next_entry == dirty_card; 454 dirty_cards++, next_entry++); 455 MemRegion cur_cards(addr_for(cur_entry), 456 dirty_cards*card_size_in_words); 457 cl->do_MemRegion(cur_cards); 458 } 459 } 460 } 461 } 462 } 463 464 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, 465 bool reset, 466 int reset_val) { 467 for (int i = 0; i < _cur_covered_regions; i++) { 468 MemRegion mri = mr.intersection(_covered[i]); 469 if (!mri.is_empty()) { 470 jbyte* cur_entry, *next_entry, *limit; 471 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 472 cur_entry <= limit; 473 cur_entry = next_entry) { 474 next_entry = cur_entry + 1; 475 if (*cur_entry == dirty_card) { 476 size_t dirty_cards; 477 // Accumulate maximal dirty card range, starting at cur_entry 478 for (dirty_cards = 1; 479 next_entry <= limit && *next_entry == dirty_card; 480 dirty_cards++, next_entry++); 481 MemRegion cur_cards(addr_for(cur_entry), 482 dirty_cards*card_size_in_words); 483 if (reset) { 484 for (size_t i = 0; i < dirty_cards; i++) { 485 cur_entry[i] = reset_val; 486 } 487 } 488 return cur_cards; 489 } 490 } 491 } 492 } 493 return MemRegion(mr.end(), mr.end()); 494 } 495 496 uintx CardTableModRefBS::ct_max_alignment_constraint() { 497 return card_size * os::vm_page_size(); 498 } 499 500 void CardTableModRefBS::verify_guard() { 501 // For product build verification 502 guarantee(_byte_map[_guard_index] == last_card, 503 "card table guard has been modified"); 504 } 505 506 void CardTableModRefBS::verify() { 507 verify_guard(); 508 } 509 510 #ifndef PRODUCT 511 void CardTableModRefBS::verify_region(MemRegion mr, 512 jbyte val, bool val_equals) { 513 jbyte* start = byte_for(mr.start()); 514 jbyte* end = byte_for(mr.last()); 515 bool failures = false; 516 for (jbyte* curr = start; curr <= end; ++curr) { 517 jbyte curr_val = *curr; 518 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 519 if (failed) { 520 if (!failures) { 521 tty->cr(); 522 tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); 523 tty->print_cr("== %sexpecting value: %d", 524 (val_equals) ? "" : "not ", val); 525 failures = true; 526 } 527 tty->print_cr("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], " 528 "val: %d", p2i(curr), p2i(addr_for(curr)), 529 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), 530 (int) curr_val); 531 } 532 } 533 guarantee(!failures, "there should not have been any failures"); 534 } 535 536 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { 537 verify_region(mr, dirty_card, false /* val_equals */); 538 } 539 540 void CardTableModRefBS::verify_dirty_region(MemRegion mr) { 541 verify_region(mr, dirty_card, true /* val_equals */); 542 } 543 #endif 544 545 void CardTableModRefBS::print_on(outputStream* st) const { 546 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, 547 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); 548 } 549