1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/cardTableModRefBS.inline.hpp" 28 #include "memory/cardTableRS.hpp" 29 #include "memory/sharedHeap.hpp" 30 #include "memory/space.hpp" 31 #include "memory/space.inline.hpp" 32 #include "memory/universe.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/mutexLocker.hpp" 35 #include "runtime/virtualspace.hpp" 36 #include "services/memTracker.hpp" 37 #include "utilities/macros.hpp" 38 #ifdef COMPILER1 39 #include "c1/c1_LIR.hpp" 40 #include "c1/c1_LIRGenerator.hpp" 41 #endif 42 43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 44 // enumerate ref fields that have been modified (since the last 45 // enumeration.) 46 47 size_t CardTableModRefBS::compute_byte_map_size() 48 { 49 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, 50 "uninitialized, check declaration order"); 51 assert(_page_size != 0, "uninitialized, check declaration order"); 52 const size_t granularity = os::vm_allocation_granularity(); 53 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); 54 } 55 56 CardTableModRefBS::CardTableModRefBS( 57 MemRegion whole_heap, 58 const BarrierSet::FakeRtti& fake_rtti) : 59 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)), 60 _whole_heap(whole_heap), 61 _guard_index(0), 62 _guard_region(), 63 _last_valid_index(0), 64 _page_size(os::vm_page_size()), 65 _byte_map_size(0), 66 _covered(NULL), 67 _committed(NULL), 68 _cur_covered_regions(0), 69 _byte_map(NULL), 70 byte_map_base(NULL), 71 // LNC functionality 72 _lowest_non_clean(NULL), 73 _lowest_non_clean_chunk_size(NULL), 74 _lowest_non_clean_base_chunk_index(NULL), 75 _last_LNC_resizing_collection(NULL) 76 { 77 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); 78 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); 79 80 assert(card_size <= 512, "card_size must be less than 512"); // why? 81 82 _covered = new MemRegion[_max_covered_regions]; 83 if (_covered == NULL) { 84 vm_exit_during_initialization("Could not allocate card table covered region set."); 85 } 86 } 87 88 void CardTableModRefBS::initialize() { 89 _guard_index = cards_required(_whole_heap.word_size()) - 1; 90 _last_valid_index = _guard_index - 1; 91 92 _byte_map_size = compute_byte_map_size(); 93 94 HeapWord* low_bound = _whole_heap.start(); 95 HeapWord* high_bound = _whole_heap.end(); 96 97 _cur_covered_regions = 0; 98 _committed = new MemRegion[_max_covered_regions]; 99 if (_committed == NULL) { 100 vm_exit_during_initialization("Could not allocate card table committed region set."); 101 } 102 103 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : 104 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); 105 ReservedSpace heap_rs(_byte_map_size, rs_align, false); 106 107 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); 108 109 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, 110 _page_size, heap_rs.base(), heap_rs.size()); 111 if (!heap_rs.is_reserved()) { 112 vm_exit_during_initialization("Could not reserve enough space for the " 113 "card marking array"); 114 } 115 116 // The assembler store_check code will do an unsigned shift of the oop, 117 // then add it to byte_map_base, i.e. 118 // 119 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) 120 _byte_map = (jbyte*) heap_rs.base(); 121 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 122 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 123 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 124 125 jbyte* guard_card = &_byte_map[_guard_index]; 126 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); 127 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); 128 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 129 !ExecMem, "card table last card"); 130 *guard_card = last_card; 131 132 _lowest_non_clean = 133 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); 134 _lowest_non_clean_chunk_size = 135 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); 136 _lowest_non_clean_base_chunk_index = 137 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); 138 _last_LNC_resizing_collection = 139 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); 140 if (_lowest_non_clean == NULL 141 || _lowest_non_clean_chunk_size == NULL 142 || _lowest_non_clean_base_chunk_index == NULL 143 || _last_LNC_resizing_collection == NULL) 144 vm_exit_during_initialization("couldn't allocate an LNC array."); 145 for (int i = 0; i < _max_covered_regions; i++) { 146 _lowest_non_clean[i] = NULL; 147 _lowest_non_clean_chunk_size[i] = 0; 148 _last_LNC_resizing_collection[i] = -1; 149 } 150 151 if (TraceCardTableModRefBS) { 152 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); 153 gclog_or_tty->print_cr(" " 154 " &_byte_map[0]: " INTPTR_FORMAT 155 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 156 p2i(&_byte_map[0]), 157 p2i(&_byte_map[_last_valid_index])); 158 gclog_or_tty->print_cr(" " 159 " byte_map_base: " INTPTR_FORMAT, 160 p2i(byte_map_base)); 161 } 162 } 163 164 CardTableModRefBS::~CardTableModRefBS() { 165 if (_covered) { 166 delete[] _covered; 167 _covered = NULL; 168 } 169 if (_committed) { 170 delete[] _committed; 171 _committed = NULL; 172 } 173 if (_lowest_non_clean) { 174 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); 175 _lowest_non_clean = NULL; 176 } 177 if (_lowest_non_clean_chunk_size) { 178 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); 179 _lowest_non_clean_chunk_size = NULL; 180 } 181 if (_lowest_non_clean_base_chunk_index) { 182 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); 183 _lowest_non_clean_base_chunk_index = NULL; 184 } 185 if (_last_LNC_resizing_collection) { 186 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); 187 _last_LNC_resizing_collection = NULL; 188 } 189 } 190 191 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { 192 int i; 193 for (i = 0; i < _cur_covered_regions; i++) { 194 if (_covered[i].start() == base) return i; 195 if (_covered[i].start() > base) break; 196 } 197 // If we didn't find it, create a new one. 198 assert(_cur_covered_regions < _max_covered_regions, 199 "too many covered regions"); 200 // Move the ones above up, to maintain sorted order. 201 for (int j = _cur_covered_regions; j > i; j--) { 202 _covered[j] = _covered[j-1]; 203 _committed[j] = _committed[j-1]; 204 } 205 int res = i; 206 _cur_covered_regions++; 207 _covered[res].set_start(base); 208 _covered[res].set_word_size(0); 209 jbyte* ct_start = byte_for(base); 210 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); 211 _committed[res].set_start((HeapWord*)ct_start_aligned); 212 _committed[res].set_word_size(0); 213 return res; 214 } 215 216 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { 217 for (int i = 0; i < _cur_covered_regions; i++) { 218 if (_covered[i].contains(addr)) { 219 return i; 220 } 221 } 222 assert(0, "address outside of heap?"); 223 return -1; 224 } 225 226 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { 227 HeapWord* max_end = NULL; 228 for (int j = 0; j < ind; j++) { 229 HeapWord* this_end = _committed[j].end(); 230 if (this_end > max_end) max_end = this_end; 231 } 232 return max_end; 233 } 234 235 MemRegion CardTableModRefBS::committed_unique_to_self(int self, 236 MemRegion mr) const { 237 MemRegion result = mr; 238 for (int r = 0; r < _cur_covered_regions; r += 1) { 239 if (r != self) { 240 result = result.minus(_committed[r]); 241 } 242 } 243 // Never include the guard page. 244 result = result.minus(_guard_region); 245 return result; 246 } 247 248 void CardTableModRefBS::resize_covered_region(MemRegion new_region) { 249 // We don't change the start of a region, only the end. 250 assert(_whole_heap.contains(new_region), 251 "attempt to cover area not in reserved area"); 252 debug_only(verify_guard();) 253 // collided is true if the expansion would push into another committed region 254 debug_only(bool collided = false;) 255 int const ind = find_covering_region_by_base(new_region.start()); 256 MemRegion const old_region = _covered[ind]; 257 assert(old_region.start() == new_region.start(), "just checking"); 258 if (new_region.word_size() != old_region.word_size()) { 259 // Commit new or uncommit old pages, if necessary. 260 MemRegion cur_committed = _committed[ind]; 261 // Extend the end of this _committed region 262 // to cover the end of any lower _committed regions. 263 // This forms overlapping regions, but never interior regions. 264 HeapWord* const max_prev_end = largest_prev_committed_end(ind); 265 if (max_prev_end > cur_committed.end()) { 266 cur_committed.set_end(max_prev_end); 267 } 268 // Align the end up to a page size (starts are already aligned). 269 jbyte* const new_end = byte_after(new_region.last()); 270 HeapWord* new_end_aligned = 271 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); 272 assert(new_end_aligned >= (HeapWord*) new_end, 273 "align up, but less"); 274 // Check the other regions (excludes "ind") to ensure that 275 // the new_end_aligned does not intrude onto the committed 276 // space of another region. 277 int ri = 0; 278 for (ri = ind + 1; ri < _cur_covered_regions; ri++) { 279 if (new_end_aligned > _committed[ri].start()) { 280 assert(new_end_aligned <= _committed[ri].end(), 281 "An earlier committed region can't cover a later committed region"); 282 // Any region containing the new end 283 // should start at or beyond the region found (ind) 284 // for the new end (committed regions are not expected to 285 // be proper subsets of other committed regions). 286 assert(_committed[ri].start() >= _committed[ind].start(), 287 "New end of committed region is inconsistent"); 288 new_end_aligned = _committed[ri].start(); 289 // new_end_aligned can be equal to the start of its 290 // committed region (i.e., of "ind") if a second 291 // region following "ind" also start at the same location 292 // as "ind". 293 assert(new_end_aligned >= _committed[ind].start(), 294 "New end of committed region is before start"); 295 debug_only(collided = true;) 296 // Should only collide with 1 region 297 break; 298 } 299 } 300 #ifdef ASSERT 301 for (++ri; ri < _cur_covered_regions; ri++) { 302 assert(!_committed[ri].contains(new_end_aligned), 303 "New end of committed region is in a second committed region"); 304 } 305 #endif 306 // The guard page is always committed and should not be committed over. 307 // "guarded" is used for assertion checking below and recalls the fact 308 // that the would-be end of the new committed region would have 309 // penetrated the guard page. 310 HeapWord* new_end_for_commit = new_end_aligned; 311 312 DEBUG_ONLY(bool guarded = false;) 313 if (new_end_for_commit > _guard_region.start()) { 314 new_end_for_commit = _guard_region.start(); 315 DEBUG_ONLY(guarded = true;) 316 } 317 318 if (new_end_for_commit > cur_committed.end()) { 319 // Must commit new pages. 320 MemRegion const new_committed = 321 MemRegion(cur_committed.end(), new_end_for_commit); 322 323 assert(!new_committed.is_empty(), "Region should not be empty here"); 324 os::commit_memory_or_exit((char*)new_committed.start(), 325 new_committed.byte_size(), _page_size, 326 !ExecMem, "card table expansion"); 327 // Use new_end_aligned (as opposed to new_end_for_commit) because 328 // the cur_committed region may include the guard region. 329 } else if (new_end_aligned < cur_committed.end()) { 330 // Must uncommit pages. 331 MemRegion const uncommit_region = 332 committed_unique_to_self(ind, MemRegion(new_end_aligned, 333 cur_committed.end())); 334 if (!uncommit_region.is_empty()) { 335 // It is not safe to uncommit cards if the boundary between 336 // the generations is moving. A shrink can uncommit cards 337 // owned by generation A but being used by generation B. 338 if (!UseAdaptiveGCBoundary) { 339 if (!os::uncommit_memory((char*)uncommit_region.start(), 340 uncommit_region.byte_size())) { 341 assert(false, "Card table contraction failed"); 342 // The call failed so don't change the end of the 343 // committed region. This is better than taking the 344 // VM down. 345 new_end_aligned = _committed[ind].end(); 346 } 347 } else { 348 new_end_aligned = _committed[ind].end(); 349 } 350 } 351 } 352 // In any case, we can reset the end of the current committed entry. 353 _committed[ind].set_end(new_end_aligned); 354 355 #ifdef ASSERT 356 // Check that the last card in the new region is committed according 357 // to the tables. 358 bool covered = false; 359 for (int cr = 0; cr < _cur_covered_regions; cr++) { 360 if (_committed[cr].contains(new_end - 1)) { 361 covered = true; 362 break; 363 } 364 } 365 assert(covered, "Card for end of new region not committed"); 366 #endif 367 368 // The default of 0 is not necessarily clean cards. 369 jbyte* entry; 370 if (old_region.last() < _whole_heap.start()) { 371 entry = byte_for(_whole_heap.start()); 372 } else { 373 entry = byte_after(old_region.last()); 374 } 375 assert(index_for(new_region.last()) < _guard_index, 376 "The guard card will be overwritten"); 377 // This line commented out cleans the newly expanded region and 378 // not the aligned up expanded region. 379 // jbyte* const end = byte_after(new_region.last()); 380 jbyte* const end = (jbyte*) new_end_for_commit; 381 assert((end >= byte_after(new_region.last())) || collided || guarded, 382 "Expect to be beyond new region unless impacting another region"); 383 // do nothing if we resized downward. 384 #ifdef ASSERT 385 for (int ri = 0; ri < _cur_covered_regions; ri++) { 386 if (ri != ind) { 387 // The end of the new committed region should not 388 // be in any existing region unless it matches 389 // the start of the next region. 390 assert(!_committed[ri].contains(end) || 391 (_committed[ri].start() == (HeapWord*) end), 392 "Overlapping committed regions"); 393 } 394 } 395 #endif 396 if (entry < end) { 397 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); 398 } 399 } 400 // In any case, the covered size changes. 401 _covered[ind].set_word_size(new_region.word_size()); 402 if (TraceCardTableModRefBS) { 403 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); 404 gclog_or_tty->print_cr(" " 405 " _covered[%d].start(): " INTPTR_FORMAT 406 " _covered[%d].last(): " INTPTR_FORMAT, 407 ind, p2i(_covered[ind].start()), 408 ind, p2i(_covered[ind].last())); 409 gclog_or_tty->print_cr(" " 410 " _committed[%d].start(): " INTPTR_FORMAT 411 " _committed[%d].last(): " INTPTR_FORMAT, 412 ind, p2i(_committed[ind].start()), 413 ind, p2i(_committed[ind].last())); 414 gclog_or_tty->print_cr(" " 415 " byte_for(start): " INTPTR_FORMAT 416 " byte_for(last): " INTPTR_FORMAT, 417 p2i(byte_for(_covered[ind].start())), 418 p2i(byte_for(_covered[ind].last()))); 419 gclog_or_tty->print_cr(" " 420 " addr_for(start): " INTPTR_FORMAT 421 " addr_for(last): " INTPTR_FORMAT, 422 p2i(addr_for((jbyte*) _committed[ind].start())), 423 p2i(addr_for((jbyte*) _committed[ind].last()))); 424 } 425 // Touch the last card of the covered region to show that it 426 // is committed (or SEGV). 427 debug_only((void) (*byte_for(_covered[ind].last()));) 428 debug_only(verify_guard();) 429 } 430 431 // Note that these versions are precise! The scanning code has to handle the 432 // fact that the write barrier may be either precise or imprecise. 433 434 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { 435 inline_write_ref_field(field, newVal, release); 436 } 437 438 439 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, 440 MemRegion mr, 441 OopsInGenClosure* cl, 442 CardTableRS* ct) { 443 if (!mr.is_empty()) { 444 // Caller (process_roots()) claims that all GC threads 445 // execute this call. With UseDynamicNumberOfGCThreads now all 446 // active GC threads execute this call. The number of active GC 447 // threads needs to be passed to par_non_clean_card_iterate_work() 448 // to get proper partitioning and termination. 449 // 450 // This is an example of where n_par_threads() is used instead 451 // of workers()->active_workers(). n_par_threads can be set to 0 to 452 // turn off parallelism. For example when this code is called as 453 // part of verification and SharedHeap::process_roots() is being 454 // used, then n_par_threads() may have been set to 0. active_workers 455 // is not overloaded with the meaning that it is a switch to disable 456 // parallelism and so keeps the meaning of the number of 457 // active gc workers. If parallelism has not been shut off by 458 // setting n_par_threads to 0, then n_par_threads should be 459 // equal to active_workers. When a different mechanism for shutting 460 // off parallelism is used, then active_workers can be used in 461 // place of n_par_threads. 462 int n_threads = GenCollectedHeap::heap()->n_par_threads(); 463 bool is_par = n_threads > 0; 464 if (is_par) { 465 #if INCLUDE_ALL_GCS 466 assert(GenCollectedHeap::heap()->n_par_threads() == 467 GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); 468 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 469 #else // INCLUDE_ALL_GCS 470 fatal("Parallel gc not supported here."); 471 #endif // INCLUDE_ALL_GCS 472 } else { 473 // clear_cl finds contiguous dirty ranges of cards to process and clear. 474 475 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary()); 476 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); 477 478 clear_cl.do_MemRegion(mr); 479 } 480 } 481 } 482 483 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { 484 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 485 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 486 jbyte* cur = byte_for(mr.start()); 487 jbyte* last = byte_after(mr.last()); 488 while (cur < last) { 489 *cur = dirty_card; 490 cur++; 491 } 492 } 493 494 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { 495 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 496 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 497 for (int i = 0; i < _cur_covered_regions; i++) { 498 MemRegion mri = mr.intersection(_covered[i]); 499 if (!mri.is_empty()) dirty_MemRegion(mri); 500 } 501 } 502 503 void CardTableModRefBS::clear_MemRegion(MemRegion mr) { 504 // Be conservative: only clean cards entirely contained within the 505 // region. 506 jbyte* cur; 507 if (mr.start() == _whole_heap.start()) { 508 cur = byte_for(mr.start()); 509 } else { 510 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 511 cur = byte_after(mr.start() - 1); 512 } 513 jbyte* last = byte_after(mr.last()); 514 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); 515 } 516 517 void CardTableModRefBS::clear(MemRegion mr) { 518 for (int i = 0; i < _cur_covered_regions; i++) { 519 MemRegion mri = mr.intersection(_covered[i]); 520 if (!mri.is_empty()) clear_MemRegion(mri); 521 } 522 } 523 524 void CardTableModRefBS::dirty(MemRegion mr) { 525 jbyte* first = byte_for(mr.start()); 526 jbyte* last = byte_after(mr.last()); 527 memset(first, dirty_card, last-first); 528 } 529 530 // Unlike several other card table methods, dirty_card_iterate() 531 // iterates over dirty cards ranges in increasing address order. 532 void CardTableModRefBS::dirty_card_iterate(MemRegion mr, 533 MemRegionClosure* cl) { 534 for (int i = 0; i < _cur_covered_regions; i++) { 535 MemRegion mri = mr.intersection(_covered[i]); 536 if (!mri.is_empty()) { 537 jbyte *cur_entry, *next_entry, *limit; 538 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 539 cur_entry <= limit; 540 cur_entry = next_entry) { 541 next_entry = cur_entry + 1; 542 if (*cur_entry == dirty_card) { 543 size_t dirty_cards; 544 // Accumulate maximal dirty card range, starting at cur_entry 545 for (dirty_cards = 1; 546 next_entry <= limit && *next_entry == dirty_card; 547 dirty_cards++, next_entry++); 548 MemRegion cur_cards(addr_for(cur_entry), 549 dirty_cards*card_size_in_words); 550 cl->do_MemRegion(cur_cards); 551 } 552 } 553 } 554 } 555 } 556 557 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, 558 bool reset, 559 int reset_val) { 560 for (int i = 0; i < _cur_covered_regions; i++) { 561 MemRegion mri = mr.intersection(_covered[i]); 562 if (!mri.is_empty()) { 563 jbyte* cur_entry, *next_entry, *limit; 564 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 565 cur_entry <= limit; 566 cur_entry = next_entry) { 567 next_entry = cur_entry + 1; 568 if (*cur_entry == dirty_card) { 569 size_t dirty_cards; 570 // Accumulate maximal dirty card range, starting at cur_entry 571 for (dirty_cards = 1; 572 next_entry <= limit && *next_entry == dirty_card; 573 dirty_cards++, next_entry++); 574 MemRegion cur_cards(addr_for(cur_entry), 575 dirty_cards*card_size_in_words); 576 if (reset) { 577 for (size_t i = 0; i < dirty_cards; i++) { 578 cur_entry[i] = reset_val; 579 } 580 } 581 return cur_cards; 582 } 583 } 584 } 585 } 586 return MemRegion(mr.end(), mr.end()); 587 } 588 589 uintx CardTableModRefBS::ct_max_alignment_constraint() { 590 return card_size * os::vm_page_size(); 591 } 592 593 void CardTableModRefBS::verify_guard() { 594 // For product build verification 595 guarantee(_byte_map[_guard_index] == last_card, 596 "card table guard has been modified"); 597 } 598 599 void CardTableModRefBS::verify() { 600 verify_guard(); 601 } 602 603 #ifndef PRODUCT 604 void CardTableModRefBS::verify_region(MemRegion mr, 605 jbyte val, bool val_equals) { 606 jbyte* start = byte_for(mr.start()); 607 jbyte* end = byte_for(mr.last()); 608 bool failures = false; 609 for (jbyte* curr = start; curr <= end; ++curr) { 610 jbyte curr_val = *curr; 611 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 612 if (failed) { 613 if (!failures) { 614 tty->cr(); 615 tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); 616 tty->print_cr("== %sexpecting value: %d", 617 (val_equals) ? "" : "not ", val); 618 failures = true; 619 } 620 tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " 621 "val: %d", p2i(curr), p2i(addr_for(curr)), 622 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), 623 (int) curr_val); 624 } 625 } 626 guarantee(!failures, "there should not have been any failures"); 627 } 628 629 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { 630 verify_region(mr, dirty_card, false /* val_equals */); 631 } 632 633 void CardTableModRefBS::verify_dirty_region(MemRegion mr) { 634 verify_region(mr, dirty_card, true /* val_equals */); 635 } 636 #endif 637 638 void CardTableModRefBS::print_on(outputStream* st) const { 639 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, 640 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); 641 } 642 643 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { 644 return 645 CardTableModRefBS::card_will_be_scanned(cv) || 646 _rs->is_prev_nonclean_card_val(cv); 647 }; 648 649 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { 650 return 651 cv != clean_card && 652 (CardTableModRefBS::card_may_have_been_dirty(cv) || 653 CardTableRS::youngergen_may_have_been_dirty(cv)); 654 };