1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/cardTableModRefBS.inline.hpp" 28 #include "memory/cardTableRS.hpp" 29 #include "memory/sharedHeap.hpp" 30 #include "memory/space.hpp" 31 #include "memory/space.inline.hpp" 32 #include "memory/universe.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/mutexLocker.hpp" 35 #include "runtime/virtualspace.hpp" 36 #include "services/memTracker.hpp" 37 #include "utilities/macros.hpp" 38 #ifdef COMPILER1 39 #include "c1/c1_LIR.hpp" 40 #include "c1/c1_LIRGenerator.hpp" 41 #endif 42 43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 44 // enumerate ref fields that have been modified (since the last 45 // enumeration.) 46 47 size_t CardTableModRefBS::compute_byte_map_size() 48 { 49 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, 50 "uninitialized, check declaration order"); 51 assert(_page_size != 0, "uninitialized, check declaration order"); 52 const size_t granularity = os::vm_allocation_granularity(); 53 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); 54 } 55 56 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, BarrierSet::Name kind) : 57 ModRefBarrierSet(kind), 58 _whole_heap(whole_heap), 59 _guard_index(0), 60 _guard_region(), 61 _last_valid_index(0), 62 _page_size(os::vm_page_size()), 63 _byte_map_size(0), 64 _covered(NULL), 65 _committed(NULL), 66 _cur_covered_regions(0), 67 _byte_map(NULL), 68 byte_map_base(NULL), 69 // LNC functionality 70 _lowest_non_clean(NULL), 71 _lowest_non_clean_chunk_size(NULL), 72 _lowest_non_clean_base_chunk_index(NULL), 73 _last_LNC_resizing_collection(NULL) 74 { 75 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); 76 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); 77 78 assert(card_size <= 512, "card_size must be less than 512"); // why? 79 80 _covered = new MemRegion[_max_covered_regions]; 81 if (_covered == NULL) { 82 vm_exit_during_initialization("Could not allocate card table covered region set."); 83 } 84 } 85 86 void CardTableModRefBS::initialize() { 87 _guard_index = cards_required(_whole_heap.word_size()) - 1; 88 _last_valid_index = _guard_index - 1; 89 90 _byte_map_size = compute_byte_map_size(); 91 92 HeapWord* low_bound = _whole_heap.start(); 93 HeapWord* high_bound = _whole_heap.end(); 94 95 _cur_covered_regions = 0; 96 _committed = new MemRegion[_max_covered_regions]; 97 if (_committed == NULL) { 98 vm_exit_during_initialization("Could not allocate card table committed region set."); 99 } 100 101 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : 102 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); 103 ReservedSpace heap_rs(_byte_map_size, rs_align, false); 104 105 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); 106 107 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, 108 _page_size, heap_rs.base(), heap_rs.size()); 109 if (!heap_rs.is_reserved()) { 110 vm_exit_during_initialization("Could not reserve enough space for the " 111 "card marking array"); 112 } 113 114 // The assembler store_check code will do an unsigned shift of the oop, 115 // then add it to byte_map_base, i.e. 116 // 117 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) 118 _byte_map = (jbyte*) heap_rs.base(); 119 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 120 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 121 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 122 123 jbyte* guard_card = &_byte_map[_guard_index]; 124 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); 125 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); 126 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 127 !ExecMem, "card table last card"); 128 *guard_card = last_card; 129 130 _lowest_non_clean = 131 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); 132 _lowest_non_clean_chunk_size = 133 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); 134 _lowest_non_clean_base_chunk_index = 135 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); 136 _last_LNC_resizing_collection = 137 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); 138 if (_lowest_non_clean == NULL 139 || _lowest_non_clean_chunk_size == NULL 140 || _lowest_non_clean_base_chunk_index == NULL 141 || _last_LNC_resizing_collection == NULL) 142 vm_exit_during_initialization("couldn't allocate an LNC array."); 143 for (int i = 0; i < _max_covered_regions; i++) { 144 _lowest_non_clean[i] = NULL; 145 _lowest_non_clean_chunk_size[i] = 0; 146 _last_LNC_resizing_collection[i] = -1; 147 } 148 149 if (TraceCardTableModRefBS) { 150 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); 151 gclog_or_tty->print_cr(" " 152 " &_byte_map[0]: " INTPTR_FORMAT 153 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 154 p2i(&_byte_map[0]), 155 p2i(&_byte_map[_last_valid_index])); 156 gclog_or_tty->print_cr(" " 157 " byte_map_base: " INTPTR_FORMAT, 158 p2i(byte_map_base)); 159 } 160 } 161 162 CardTableModRefBS::~CardTableModRefBS() { 163 if (_covered) { 164 delete[] _covered; 165 _covered = NULL; 166 } 167 if (_committed) { 168 delete[] _committed; 169 _committed = NULL; 170 } 171 if (_lowest_non_clean) { 172 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); 173 _lowest_non_clean = NULL; 174 } 175 if (_lowest_non_clean_chunk_size) { 176 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); 177 _lowest_non_clean_chunk_size = NULL; 178 } 179 if (_lowest_non_clean_base_chunk_index) { 180 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); 181 _lowest_non_clean_base_chunk_index = NULL; 182 } 183 if (_last_LNC_resizing_collection) { 184 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); 185 _last_LNC_resizing_collection = NULL; 186 } 187 } 188 189 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { 190 int i; 191 for (i = 0; i < _cur_covered_regions; i++) { 192 if (_covered[i].start() == base) return i; 193 if (_covered[i].start() > base) break; 194 } 195 // If we didn't find it, create a new one. 196 assert(_cur_covered_regions < _max_covered_regions, 197 "too many covered regions"); 198 // Move the ones above up, to maintain sorted order. 199 for (int j = _cur_covered_regions; j > i; j--) { 200 _covered[j] = _covered[j-1]; 201 _committed[j] = _committed[j-1]; 202 } 203 int res = i; 204 _cur_covered_regions++; 205 _covered[res].set_start(base); 206 _covered[res].set_word_size(0); 207 jbyte* ct_start = byte_for(base); 208 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); 209 _committed[res].set_start((HeapWord*)ct_start_aligned); 210 _committed[res].set_word_size(0); 211 return res; 212 } 213 214 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { 215 for (int i = 0; i < _cur_covered_regions; i++) { 216 if (_covered[i].contains(addr)) { 217 return i; 218 } 219 } 220 assert(0, "address outside of heap?"); 221 return -1; 222 } 223 224 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { 225 HeapWord* max_end = NULL; 226 for (int j = 0; j < ind; j++) { 227 HeapWord* this_end = _committed[j].end(); 228 if (this_end > max_end) max_end = this_end; 229 } 230 return max_end; 231 } 232 233 MemRegion CardTableModRefBS::committed_unique_to_self(int self, 234 MemRegion mr) const { 235 MemRegion result = mr; 236 for (int r = 0; r < _cur_covered_regions; r += 1) { 237 if (r != self) { 238 result = result.minus(_committed[r]); 239 } 240 } 241 // Never include the guard page. 242 result = result.minus(_guard_region); 243 return result; 244 } 245 246 void CardTableModRefBS::resize_covered_region(MemRegion new_region) { 247 // We don't change the start of a region, only the end. 248 assert(_whole_heap.contains(new_region), 249 "attempt to cover area not in reserved area"); 250 debug_only(verify_guard();) 251 // collided is true if the expansion would push into another committed region 252 debug_only(bool collided = false;) 253 int const ind = find_covering_region_by_base(new_region.start()); 254 MemRegion const old_region = _covered[ind]; 255 assert(old_region.start() == new_region.start(), "just checking"); 256 if (new_region.word_size() != old_region.word_size()) { 257 // Commit new or uncommit old pages, if necessary. 258 MemRegion cur_committed = _committed[ind]; 259 // Extend the end of this _committed region 260 // to cover the end of any lower _committed regions. 261 // This forms overlapping regions, but never interior regions. 262 HeapWord* const max_prev_end = largest_prev_committed_end(ind); 263 if (max_prev_end > cur_committed.end()) { 264 cur_committed.set_end(max_prev_end); 265 } 266 // Align the end up to a page size (starts are already aligned). 267 jbyte* const new_end = byte_after(new_region.last()); 268 HeapWord* new_end_aligned = 269 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); 270 assert(new_end_aligned >= (HeapWord*) new_end, 271 "align up, but less"); 272 // Check the other regions (excludes "ind") to ensure that 273 // the new_end_aligned does not intrude onto the committed 274 // space of another region. 275 int ri = 0; 276 for (ri = ind + 1; ri < _cur_covered_regions; ri++) { 277 if (new_end_aligned > _committed[ri].start()) { 278 assert(new_end_aligned <= _committed[ri].end(), 279 "An earlier committed region can't cover a later committed region"); 280 // Any region containing the new end 281 // should start at or beyond the region found (ind) 282 // for the new end (committed regions are not expected to 283 // be proper subsets of other committed regions). 284 assert(_committed[ri].start() >= _committed[ind].start(), 285 "New end of committed region is inconsistent"); 286 new_end_aligned = _committed[ri].start(); 287 // new_end_aligned can be equal to the start of its 288 // committed region (i.e., of "ind") if a second 289 // region following "ind" also start at the same location 290 // as "ind". 291 assert(new_end_aligned >= _committed[ind].start(), 292 "New end of committed region is before start"); 293 debug_only(collided = true;) 294 // Should only collide with 1 region 295 break; 296 } 297 } 298 #ifdef ASSERT 299 for (++ri; ri < _cur_covered_regions; ri++) { 300 assert(!_committed[ri].contains(new_end_aligned), 301 "New end of committed region is in a second committed region"); 302 } 303 #endif 304 // The guard page is always committed and should not be committed over. 305 // "guarded" is used for assertion checking below and recalls the fact 306 // that the would-be end of the new committed region would have 307 // penetrated the guard page. 308 HeapWord* new_end_for_commit = new_end_aligned; 309 310 DEBUG_ONLY(bool guarded = false;) 311 if (new_end_for_commit > _guard_region.start()) { 312 new_end_for_commit = _guard_region.start(); 313 DEBUG_ONLY(guarded = true;) 314 } 315 316 if (new_end_for_commit > cur_committed.end()) { 317 // Must commit new pages. 318 MemRegion const new_committed = 319 MemRegion(cur_committed.end(), new_end_for_commit); 320 321 assert(!new_committed.is_empty(), "Region should not be empty here"); 322 os::commit_memory_or_exit((char*)new_committed.start(), 323 new_committed.byte_size(), _page_size, 324 !ExecMem, "card table expansion"); 325 // Use new_end_aligned (as opposed to new_end_for_commit) because 326 // the cur_committed region may include the guard region. 327 } else if (new_end_aligned < cur_committed.end()) { 328 // Must uncommit pages. 329 MemRegion const uncommit_region = 330 committed_unique_to_self(ind, MemRegion(new_end_aligned, 331 cur_committed.end())); 332 if (!uncommit_region.is_empty()) { 333 // It is not safe to uncommit cards if the boundary between 334 // the generations is moving. A shrink can uncommit cards 335 // owned by generation A but being used by generation B. 336 if (!UseAdaptiveGCBoundary) { 337 if (!os::uncommit_memory((char*)uncommit_region.start(), 338 uncommit_region.byte_size())) { 339 assert(false, "Card table contraction failed"); 340 // The call failed so don't change the end of the 341 // committed region. This is better than taking the 342 // VM down. 343 new_end_aligned = _committed[ind].end(); 344 } 345 } else { 346 new_end_aligned = _committed[ind].end(); 347 } 348 } 349 } 350 // In any case, we can reset the end of the current committed entry. 351 _committed[ind].set_end(new_end_aligned); 352 353 #ifdef ASSERT 354 // Check that the last card in the new region is committed according 355 // to the tables. 356 bool covered = false; 357 for (int cr = 0; cr < _cur_covered_regions; cr++) { 358 if (_committed[cr].contains(new_end - 1)) { 359 covered = true; 360 break; 361 } 362 } 363 assert(covered, "Card for end of new region not committed"); 364 #endif 365 366 // The default of 0 is not necessarily clean cards. 367 jbyte* entry; 368 if (old_region.last() < _whole_heap.start()) { 369 entry = byte_for(_whole_heap.start()); 370 } else { 371 entry = byte_after(old_region.last()); 372 } 373 assert(index_for(new_region.last()) < _guard_index, 374 "The guard card will be overwritten"); 375 // This line commented out cleans the newly expanded region and 376 // not the aligned up expanded region. 377 // jbyte* const end = byte_after(new_region.last()); 378 jbyte* const end = (jbyte*) new_end_for_commit; 379 assert((end >= byte_after(new_region.last())) || collided || guarded, 380 "Expect to be beyond new region unless impacting another region"); 381 // do nothing if we resized downward. 382 #ifdef ASSERT 383 for (int ri = 0; ri < _cur_covered_regions; ri++) { 384 if (ri != ind) { 385 // The end of the new committed region should not 386 // be in any existing region unless it matches 387 // the start of the next region. 388 assert(!_committed[ri].contains(end) || 389 (_committed[ri].start() == (HeapWord*) end), 390 "Overlapping committed regions"); 391 } 392 } 393 #endif 394 if (entry < end) { 395 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); 396 } 397 } 398 // In any case, the covered size changes. 399 _covered[ind].set_word_size(new_region.word_size()); 400 if (TraceCardTableModRefBS) { 401 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); 402 gclog_or_tty->print_cr(" " 403 " _covered[%d].start(): " INTPTR_FORMAT 404 " _covered[%d].last(): " INTPTR_FORMAT, 405 ind, p2i(_covered[ind].start()), 406 ind, p2i(_covered[ind].last())); 407 gclog_or_tty->print_cr(" " 408 " _committed[%d].start(): " INTPTR_FORMAT 409 " _committed[%d].last(): " INTPTR_FORMAT, 410 ind, p2i(_committed[ind].start()), 411 ind, p2i(_committed[ind].last())); 412 gclog_or_tty->print_cr(" " 413 " byte_for(start): " INTPTR_FORMAT 414 " byte_for(last): " INTPTR_FORMAT, 415 p2i(byte_for(_covered[ind].start())), 416 p2i(byte_for(_covered[ind].last()))); 417 gclog_or_tty->print_cr(" " 418 " addr_for(start): " INTPTR_FORMAT 419 " addr_for(last): " INTPTR_FORMAT, 420 p2i(addr_for((jbyte*) _committed[ind].start())), 421 p2i(addr_for((jbyte*) _committed[ind].last()))); 422 } 423 // Touch the last card of the covered region to show that it 424 // is committed (or SEGV). 425 debug_only((void) (*byte_for(_covered[ind].last()));) 426 debug_only(verify_guard();) 427 } 428 429 // Note that these versions are precise! The scanning code has to handle the 430 // fact that the write barrier may be either precise or imprecise. 431 432 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { 433 inline_write_ref_field(field, newVal, release); 434 } 435 436 437 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, 438 MemRegion mr, 439 OopsInGenClosure* cl, 440 CardTableRS* ct) { 441 if (!mr.is_empty()) { 442 // Caller (process_roots()) claims that all GC threads 443 // execute this call. With UseDynamicNumberOfGCThreads now all 444 // active GC threads execute this call. The number of active GC 445 // threads needs to be passed to par_non_clean_card_iterate_work() 446 // to get proper partitioning and termination. 447 // 448 // This is an example of where n_par_threads() is used instead 449 // of workers()->active_workers(). n_par_threads can be set to 0 to 450 // turn off parallelism. For example when this code is called as 451 // part of verification and SharedHeap::process_roots() is being 452 // used, then n_par_threads() may have been set to 0. active_workers 453 // is not overloaded with the meaning that it is a switch to disable 454 // parallelism and so keeps the meaning of the number of 455 // active gc workers. If parallelism has not been shut off by 456 // setting n_par_threads to 0, then n_par_threads should be 457 // equal to active_workers. When a different mechanism for shutting 458 // off parallelism is used, then active_workers can be used in 459 // place of n_par_threads. 460 int n_threads = SharedHeap::heap()->n_par_threads(); 461 bool is_par = n_threads > 0; 462 if (is_par) { 463 #if INCLUDE_ALL_GCS 464 assert(SharedHeap::heap()->n_par_threads() == 465 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); 466 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); 467 #else // INCLUDE_ALL_GCS 468 fatal("Parallel gc not supported here."); 469 #endif // INCLUDE_ALL_GCS 470 } else { 471 // We do not call the non_clean_card_iterate_serial() version below because 472 // we want to clear the cards (which non_clean_card_iterate_serial() does not 473 // do for us): clear_cl here does the work of finding contiguous dirty ranges 474 // of cards to process and clear. 475 476 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), 477 cl->gen_boundary()); 478 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); 479 480 clear_cl.do_MemRegion(mr); 481 } 482 } 483 } 484 485 // The iterator itself is not MT-aware, but 486 // MT-aware callers and closures can use this to 487 // accomplish dirty card iteration in parallel. The 488 // iterator itself does not clear the dirty cards, or 489 // change their values in any manner. 490 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, 491 MemRegionClosure* cl) { 492 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); 493 assert(!is_par || 494 (SharedHeap::heap()->n_par_threads() == 495 SharedHeap::heap()->workers()->active_workers()), "Mismatch"); 496 for (int i = 0; i < _cur_covered_regions; i++) { 497 MemRegion mri = mr.intersection(_covered[i]); 498 if (mri.word_size() > 0) { 499 jbyte* cur_entry = byte_for(mri.last()); 500 jbyte* limit = byte_for(mri.start()); 501 while (cur_entry >= limit) { 502 jbyte* next_entry = cur_entry - 1; 503 if (*cur_entry != clean_card) { 504 size_t non_clean_cards = 1; 505 // Should the next card be included in this range of dirty cards. 506 while (next_entry >= limit && *next_entry != clean_card) { 507 non_clean_cards++; 508 cur_entry = next_entry; 509 next_entry--; 510 } 511 // The memory region may not be on a card boundary. So that 512 // objects beyond the end of the region are not processed, make 513 // cur_cards precise with regard to the end of the memory region. 514 MemRegion cur_cards(addr_for(cur_entry), 515 non_clean_cards * card_size_in_words); 516 MemRegion dirty_region = cur_cards.intersection(mri); 517 cl->do_MemRegion(dirty_region); 518 } 519 cur_entry = next_entry; 520 } 521 } 522 } 523 } 524 525 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { 526 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 527 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 528 jbyte* cur = byte_for(mr.start()); 529 jbyte* last = byte_after(mr.last()); 530 while (cur < last) { 531 *cur = dirty_card; 532 cur++; 533 } 534 } 535 536 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { 537 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 538 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 539 for (int i = 0; i < _cur_covered_regions; i++) { 540 MemRegion mri = mr.intersection(_covered[i]); 541 if (!mri.is_empty()) dirty_MemRegion(mri); 542 } 543 } 544 545 void CardTableModRefBS::clear_MemRegion(MemRegion mr) { 546 // Be conservative: only clean cards entirely contained within the 547 // region. 548 jbyte* cur; 549 if (mr.start() == _whole_heap.start()) { 550 cur = byte_for(mr.start()); 551 } else { 552 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 553 cur = byte_after(mr.start() - 1); 554 } 555 jbyte* last = byte_after(mr.last()); 556 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); 557 } 558 559 void CardTableModRefBS::clear(MemRegion mr) { 560 for (int i = 0; i < _cur_covered_regions; i++) { 561 MemRegion mri = mr.intersection(_covered[i]); 562 if (!mri.is_empty()) clear_MemRegion(mri); 563 } 564 } 565 566 void CardTableModRefBS::dirty(MemRegion mr) { 567 jbyte* first = byte_for(mr.start()); 568 jbyte* last = byte_after(mr.last()); 569 memset(first, dirty_card, last-first); 570 } 571 572 // Unlike several other card table methods, dirty_card_iterate() 573 // iterates over dirty cards ranges in increasing address order. 574 void CardTableModRefBS::dirty_card_iterate(MemRegion mr, 575 MemRegionClosure* cl) { 576 for (int i = 0; i < _cur_covered_regions; i++) { 577 MemRegion mri = mr.intersection(_covered[i]); 578 if (!mri.is_empty()) { 579 jbyte *cur_entry, *next_entry, *limit; 580 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 581 cur_entry <= limit; 582 cur_entry = next_entry) { 583 next_entry = cur_entry + 1; 584 if (*cur_entry == dirty_card) { 585 size_t dirty_cards; 586 // Accumulate maximal dirty card range, starting at cur_entry 587 for (dirty_cards = 1; 588 next_entry <= limit && *next_entry == dirty_card; 589 dirty_cards++, next_entry++); 590 MemRegion cur_cards(addr_for(cur_entry), 591 dirty_cards*card_size_in_words); 592 cl->do_MemRegion(cur_cards); 593 } 594 } 595 } 596 } 597 } 598 599 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, 600 bool reset, 601 int reset_val) { 602 for (int i = 0; i < _cur_covered_regions; i++) { 603 MemRegion mri = mr.intersection(_covered[i]); 604 if (!mri.is_empty()) { 605 jbyte* cur_entry, *next_entry, *limit; 606 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); 607 cur_entry <= limit; 608 cur_entry = next_entry) { 609 next_entry = cur_entry + 1; 610 if (*cur_entry == dirty_card) { 611 size_t dirty_cards; 612 // Accumulate maximal dirty card range, starting at cur_entry 613 for (dirty_cards = 1; 614 next_entry <= limit && *next_entry == dirty_card; 615 dirty_cards++, next_entry++); 616 MemRegion cur_cards(addr_for(cur_entry), 617 dirty_cards*card_size_in_words); 618 if (reset) { 619 for (size_t i = 0; i < dirty_cards; i++) { 620 cur_entry[i] = reset_val; 621 } 622 } 623 return cur_cards; 624 } 625 } 626 } 627 } 628 return MemRegion(mr.end(), mr.end()); 629 } 630 631 uintx CardTableModRefBS::ct_max_alignment_constraint() { 632 return card_size * os::vm_page_size(); 633 } 634 635 void CardTableModRefBS::verify_guard() { 636 // For product build verification 637 guarantee(_byte_map[_guard_index] == last_card, 638 "card table guard has been modified"); 639 } 640 641 void CardTableModRefBS::verify() { 642 verify_guard(); 643 } 644 645 #ifndef PRODUCT 646 void CardTableModRefBS::verify_region(MemRegion mr, 647 jbyte val, bool val_equals) { 648 jbyte* start = byte_for(mr.start()); 649 jbyte* end = byte_for(mr.last()); 650 bool failures = false; 651 for (jbyte* curr = start; curr <= end; ++curr) { 652 jbyte curr_val = *curr; 653 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 654 if (failed) { 655 if (!failures) { 656 tty->cr(); 657 tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); 658 tty->print_cr("== %sexpecting value: %d", 659 (val_equals) ? "" : "not ", val); 660 failures = true; 661 } 662 tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " 663 "val: %d", p2i(curr), p2i(addr_for(curr)), 664 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), 665 (int) curr_val); 666 } 667 } 668 guarantee(!failures, "there should not have been any failures"); 669 } 670 671 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { 672 verify_region(mr, dirty_card, false /* val_equals */); 673 } 674 675 void CardTableModRefBS::verify_dirty_region(MemRegion mr) { 676 verify_region(mr, dirty_card, true /* val_equals */); 677 } 678 #endif 679 680 void CardTableModRefBS::print_on(outputStream* st) const { 681 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, 682 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); 683 } 684 685 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { 686 return 687 CardTableModRefBS::card_will_be_scanned(cv) || 688 _rs->is_prev_nonclean_card_val(cv); 689 }; 690 691 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { 692 return 693 cv != clean_card && 694 (CardTableModRefBS::card_may_have_been_dirty(cv) || 695 CardTableRS::youngergen_may_have_been_dirty(cv)); 696 };