1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTableModRefBS.inline.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "gc/shared/genCollectedHeap.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/virtualspace.hpp" 31 #include "logging/log.hpp" 32 #include "services/memTracker.hpp" 33 #include "utilities/macros.hpp" 34 35 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 36 // enumerate ref fields that have been modified (since the last 37 // enumeration.) 38 39 size_t CardTableModRefBS::compute_byte_map_size() 40 { 41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, 42 "uninitialized, check declaration order"); 43 assert(_page_size != 0, "uninitialized, check declaration order"); 44 const size_t granularity = os::vm_allocation_granularity(); 45 return align_up(_guard_index + 1, MAX2(_page_size, granularity)); 46 } 47 48 CardTableModRefBS::CardTableModRefBS( 49 MemRegion whole_heap, 50 const BarrierSet::FakeRtti& fake_rtti) : 51 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)), 52 _whole_heap(whole_heap), 53 _guard_index(0), 54 _guard_region(), 55 _last_valid_index(0), 56 _page_size(os::vm_page_size()), 57 _byte_map_size(0), 58 _covered(NULL), 59 _committed(NULL), 60 _cur_covered_regions(0), 61 _byte_map(NULL), 62 byte_map_base(NULL) 63 { 64 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); 65 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); 66 67 assert(card_size <= 512, "card_size must be less than 512"); // why? 68 69 _covered = new MemRegion[_max_covered_regions]; 70 if (_covered == NULL) { 71 vm_exit_during_initialization("Could not allocate card table covered region set."); 72 } 73 } 74 75 void CardTableModRefBS::initialize() { 76 _guard_index = cards_required(_whole_heap.word_size()) - 1; 77 _last_valid_index = _guard_index - 1; 78 79 _byte_map_size = compute_byte_map_size(); 80 81 HeapWord* low_bound = _whole_heap.start(); 82 HeapWord* high_bound = _whole_heap.end(); 83 84 _cur_covered_regions = 0; 85 _committed = new MemRegion[_max_covered_regions]; 86 if (_committed == NULL) { 87 vm_exit_during_initialization("Could not allocate card table committed region set."); 88 } 89 90 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : 91 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); 92 ReservedSpace heap_rs(_byte_map_size, rs_align, false); 93 94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); 95 96 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1, 97 _page_size, heap_rs.base(), heap_rs.size()); 98 if (!heap_rs.is_reserved()) { 99 vm_exit_during_initialization("Could not reserve enough space for the " 100 "card marking array"); 101 } 102 103 // The assembler store_check code will do an unsigned shift of the oop, 104 // then add it to byte_map_base, i.e. 105 // 106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) 107 _byte_map = (jbyte*) heap_rs.base(); 108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 111 112 jbyte* guard_card = &_byte_map[_guard_index]; 113 uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size); 114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); 115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 116 !ExecMem, "card table last card"); 117 *guard_card = last_card; 118 119 log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: "); 120 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 121 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); 122 log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); 123 } 124 125 CardTableModRefBS::~CardTableModRefBS() { 126 if (_covered) { 127 delete[] _covered; 128 _covered = NULL; 129 } 130 if (_committed) { 131 delete[] _committed; 132 _committed = NULL; 133 } 134 } 135 136 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { 137 int i; 138 for (i = 0; i < _cur_covered_regions; i++) { 139 if (_covered[i].start() == base) return i; 140 if (_covered[i].start() > base) break; 141 } 142 // If we didn't find it, create a new one. 143 assert(_cur_covered_regions < _max_covered_regions, 144 "too many covered regions"); 145 // Move the ones above up, to maintain sorted order. 146 for (int j = _cur_covered_regions; j > i; j--) { 147 _covered[j] = _covered[j-1]; 148 _committed[j] = _committed[j-1]; 149 } 150 int res = i; 151 _cur_covered_regions++; 152 _covered[res].set_start(base); 153 _covered[res].set_word_size(0); 154 jbyte* ct_start = byte_for(base); 155 uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size); 156 _committed[res].set_start((HeapWord*)ct_start_aligned); 157 _committed[res].set_word_size(0); 158 return res; 159 } 160 161 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { 162 for (int i = 0; i < _cur_covered_regions; i++) { 163 if (_covered[i].contains(addr)) { 164 return i; 165 } 166 } 167 assert(0, "address outside of heap?"); 168 return -1; 169 } 170 171 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { 172 HeapWord* max_end = NULL; 173 for (int j = 0; j < ind; j++) { 174 HeapWord* this_end = _committed[j].end(); 175 if (this_end > max_end) max_end = this_end; 176 } 177 return max_end; 178 } 179 180 MemRegion CardTableModRefBS::committed_unique_to_self(int self, 181 MemRegion mr) const { 182 MemRegion result = mr; 183 for (int r = 0; r < _cur_covered_regions; r += 1) { 184 if (r != self) { 185 result = result.minus(_committed[r]); 186 } 187 } 188 // Never include the guard page. 189 result = result.minus(_guard_region); 190 return result; 191 } 192 193 void CardTableModRefBS::resize_covered_region(MemRegion new_region) { 194 // We don't change the start of a region, only the end. 195 assert(_whole_heap.contains(new_region), 196 "attempt to cover area not in reserved area"); 197 debug_only(verify_guard();) 198 // collided is true if the expansion would push into another committed region 199 debug_only(bool collided = false;) 200 int const ind = find_covering_region_by_base(new_region.start()); 201 MemRegion const old_region = _covered[ind]; 202 assert(old_region.start() == new_region.start(), "just checking"); 203 if (new_region.word_size() != old_region.word_size()) { 204 // Commit new or uncommit old pages, if necessary. 205 MemRegion cur_committed = _committed[ind]; 206 // Extend the end of this _committed region 207 // to cover the end of any lower _committed regions. 208 // This forms overlapping regions, but never interior regions. 209 HeapWord* const max_prev_end = largest_prev_committed_end(ind); 210 if (max_prev_end > cur_committed.end()) { 211 cur_committed.set_end(max_prev_end); 212 } 213 // Align the end up to a page size (starts are already aligned). 214 jbyte* const new_end = byte_after(new_region.last()); 215 HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size); 216 assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); 217 // Check the other regions (excludes "ind") to ensure that 218 // the new_end_aligned does not intrude onto the committed 219 // space of another region. 220 int ri = 0; 221 for (ri = ind + 1; ri < _cur_covered_regions; ri++) { 222 if (new_end_aligned > _committed[ri].start()) { 223 assert(new_end_aligned <= _committed[ri].end(), 224 "An earlier committed region can't cover a later committed region"); 225 // Any region containing the new end 226 // should start at or beyond the region found (ind) 227 // for the new end (committed regions are not expected to 228 // be proper subsets of other committed regions). 229 assert(_committed[ri].start() >= _committed[ind].start(), 230 "New end of committed region is inconsistent"); 231 new_end_aligned = _committed[ri].start(); 232 // new_end_aligned can be equal to the start of its 233 // committed region (i.e., of "ind") if a second 234 // region following "ind" also start at the same location 235 // as "ind". 236 assert(new_end_aligned >= _committed[ind].start(), 237 "New end of committed region is before start"); 238 debug_only(collided = true;) 239 // Should only collide with 1 region 240 break; 241 } 242 } 243 #ifdef ASSERT 244 for (++ri; ri < _cur_covered_regions; ri++) { 245 assert(!_committed[ri].contains(new_end_aligned), 246 "New end of committed region is in a second committed region"); 247 } 248 #endif 249 // The guard page is always committed and should not be committed over. 250 // "guarded" is used for assertion checking below and recalls the fact 251 // that the would-be end of the new committed region would have 252 // penetrated the guard page. 253 HeapWord* new_end_for_commit = new_end_aligned; 254 255 DEBUG_ONLY(bool guarded = false;) 256 if (new_end_for_commit > _guard_region.start()) { 257 new_end_for_commit = _guard_region.start(); 258 DEBUG_ONLY(guarded = true;) 259 } 260 261 if (new_end_for_commit > cur_committed.end()) { 262 // Must commit new pages. 263 MemRegion const new_committed = 264 MemRegion(cur_committed.end(), new_end_for_commit); 265 266 assert(!new_committed.is_empty(), "Region should not be empty here"); 267 os::commit_memory_or_exit((char*)new_committed.start(), 268 new_committed.byte_size(), _page_size, 269 !ExecMem, "card table expansion"); 270 // Use new_end_aligned (as opposed to new_end_for_commit) because 271 // the cur_committed region may include the guard region. 272 } else if (new_end_aligned < cur_committed.end()) { 273 // Must uncommit pages. 274 MemRegion const uncommit_region = 275 committed_unique_to_self(ind, MemRegion(new_end_aligned, 276 cur_committed.end())); 277 if (!uncommit_region.is_empty()) { 278 // It is not safe to uncommit cards if the boundary between 279 // the generations is moving. A shrink can uncommit cards 280 // owned by generation A but being used by generation B. 281 if (!UseAdaptiveGCBoundary) { 282 if (!os::uncommit_memory((char*)uncommit_region.start(), 283 uncommit_region.byte_size())) { 284 assert(false, "Card table contraction failed"); 285 // The call failed so don't change the end of the 286 // committed region. This is better than taking the 287 // VM down. 288 new_end_aligned = _committed[ind].end(); 289 } 290 } else { 291 new_end_aligned = _committed[ind].end(); 292 } 293 } 294 } 295 // In any case, we can reset the end of the current committed entry. 296 _committed[ind].set_end(new_end_aligned); 297 298 #ifdef ASSERT 299 // Check that the last card in the new region is committed according 300 // to the tables. 301 bool covered = false; 302 for (int cr = 0; cr < _cur_covered_regions; cr++) { 303 if (_committed[cr].contains(new_end - 1)) { 304 covered = true; 305 break; 306 } 307 } 308 assert(covered, "Card for end of new region not committed"); 309 #endif 310 311 // The default of 0 is not necessarily clean cards. 312 jbyte* entry; 313 if (old_region.last() < _whole_heap.start()) { 314 entry = byte_for(_whole_heap.start()); 315 } else { 316 entry = byte_after(old_region.last()); 317 } 318 assert(index_for(new_region.last()) < _guard_index, 319 "The guard card will be overwritten"); 320 // This line commented out cleans the newly expanded region and 321 // not the aligned up expanded region. 322 // jbyte* const end = byte_after(new_region.last()); 323 jbyte* const end = (jbyte*) new_end_for_commit; 324 assert((end >= byte_after(new_region.last())) || collided || guarded, 325 "Expect to be beyond new region unless impacting another region"); 326 // do nothing if we resized downward. 327 #ifdef ASSERT 328 for (int ri = 0; ri < _cur_covered_regions; ri++) { 329 if (ri != ind) { 330 // The end of the new committed region should not 331 // be in any existing region unless it matches 332 // the start of the next region. 333 assert(!_committed[ri].contains(end) || 334 (_committed[ri].start() == (HeapWord*) end), 335 "Overlapping committed regions"); 336 } 337 } 338 #endif 339 if (entry < end) { 340 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); 341 } 342 } 343 // In any case, the covered size changes. 344 _covered[ind].set_word_size(new_region.word_size()); 345 346 log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: "); 347 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, 348 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); 349 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, 350 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); 351 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, 352 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); 353 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, 354 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); 355 356 // Touch the last card of the covered region to show that it 357 // is committed (or SEGV). 358 debug_only((void) (*byte_for(_covered[ind].last()));) 359 debug_only(verify_guard();) 360 } 361 362 // Note that these versions are precise! The scanning code has to handle the 363 // fact that the write barrier may be either precise or imprecise. 364 365 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { 366 inline_write_ref_field(field, newVal, release); 367 } 368 369 370 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { 371 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 372 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 373 jbyte* cur = byte_for(mr.start()); 374 jbyte* last = byte_after(mr.last()); 375 while (cur < last) { 376 *cur = dirty_card; 377 cur++; 378 } 379 } 380 381 void CardTableModRefBS::invalidate(MemRegion mr) { 382 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 383 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 384 for (int i = 0; i < _cur_covered_regions; i++) { 385 MemRegion mri = mr.intersection(_covered[i]); 386 if (!mri.is_empty()) dirty_MemRegion(mri); 387 } 388 } 389 390 void CardTableModRefBS::clear_MemRegion(MemRegion mr) { 391 // Be conservative: only clean cards entirely contained within the 392 // region. 393 jbyte* cur; 394 if (mr.start() == _whole_heap.start()) { 395 cur = byte_for(mr.start()); 396 } else { 397 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 398 cur = byte_after(mr.start() - 1); 399 } 400 jbyte* last = byte_after(mr.last()); 401 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); 402 } 403 404 void CardTableModRefBS::clear(MemRegion mr) { 405 for (int i = 0; i < _cur_covered_regions; i++) { 406 MemRegion mri = mr.intersection(_covered[i]); 407 if (!mri.is_empty()) clear_MemRegion(mri); 408 } 409 } 410 411 void CardTableModRefBS::dirty(MemRegion mr) { 412 jbyte* first = byte_for(mr.start()); 413 jbyte* last = byte_after(mr.last()); 414 memset(first, dirty_card, last-first); 415 } 416 417 // Unlike several other card table methods, dirty_card_iterate() 418 // iterates over dirty cards ranges in increasing address order. 419 void CardTableModRefBS::dirty_card_iterate(MemRegion mr, 420 MemRegionClosure* cl) { 421 for (int i = 0; i < _cur_covered_regions; i++) { 422 MemRegion mri = mr.intersection(_covered[i]); 423 if (!mri.is_empty()) { 424 jbyte *cur_entry, *next_entry, *limit; 425 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 426 cur_entry <= limit; 427 cur_entry = next_entry) { 428 next_entry = cur_entry + 1; 429 if (*cur_entry == dirty_card) { 430 size_t dirty_cards; 431 // Accumulate maximal dirty card range, starting at cur_entry 432 for (dirty_cards = 1; 433 next_entry <= limit && *next_entry == dirty_card; 434 dirty_cards++, next_entry++); 435 MemRegion cur_cards(addr_for(cur_entry), 436 dirty_cards*card_size_in_words); 437 cl->do_MemRegion(cur_cards); 438 } 439 } 440 } 441 } 442 } 443 444 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, 445 bool reset, 446 int reset_val) { 447 for (int i = 0; i < _cur_covered_regions; i++) { 448 MemRegion mri = mr.intersection(_covered[i]); 449 if (!mri.is_empty()) { 450 jbyte* cur_entry, *next_entry, *limit; 451 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 452 cur_entry <= limit; 453 cur_entry = next_entry) { 454 next_entry = cur_entry + 1; 455 if (*cur_entry == dirty_card) { 456 size_t dirty_cards; 457 // Accumulate maximal dirty card range, starting at cur_entry 458 for (dirty_cards = 1; 459 next_entry <= limit && *next_entry == dirty_card; 460 dirty_cards++, next_entry++); 461 MemRegion cur_cards(addr_for(cur_entry), 462 dirty_cards*card_size_in_words); 463 if (reset) { 464 for (size_t i = 0; i < dirty_cards; i++) { 465 cur_entry[i] = reset_val; 466 } 467 } 468 return cur_cards; 469 } 470 } 471 } 472 } 473 return MemRegion(mr.end(), mr.end()); 474 } 475 476 uintx CardTableModRefBS::ct_max_alignment_constraint() { 477 return card_size * os::vm_page_size(); 478 } 479 480 void CardTableModRefBS::verify_guard() { 481 // For product build verification 482 guarantee(_byte_map[_guard_index] == last_card, 483 "card table guard has been modified"); 484 } 485 486 void CardTableModRefBS::verify() { 487 verify_guard(); 488 } 489 490 #ifndef PRODUCT 491 void CardTableModRefBS::verify_region(MemRegion mr, 492 jbyte val, bool val_equals) { 493 jbyte* start = byte_for(mr.start()); 494 jbyte* end = byte_for(mr.last()); 495 bool failures = false; 496 for (jbyte* curr = start; curr <= end; ++curr) { 497 jbyte curr_val = *curr; 498 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 499 if (failed) { 500 if (!failures) { 501 log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); 502 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); 503 failures = true; 504 } 505 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", 506 p2i(curr), p2i(addr_for(curr)), 507 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), 508 (int) curr_val); 509 } 510 } 511 guarantee(!failures, "there should not have been any failures"); 512 } 513 514 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { 515 verify_region(mr, dirty_card, false /* val_equals */); 516 } 517 518 void CardTableModRefBS::verify_dirty_region(MemRegion mr) { 519 verify_region(mr, dirty_card, true /* val_equals */); 520 } 521 #endif 522 523 void CardTableModRefBS::print_on(outputStream* st) const { 524 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, 525 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); 526 } 527