1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/cardTableExtension.hpp" 27 #include "gc/parallel/gcTaskManager.hpp" 28 #include "gc/parallel/parallelScavengeHeap.hpp" 29 #include "gc/parallel/psPromotionManager.inline.hpp" 30 #include "gc/parallel/psScavenge.hpp" 31 #include "gc/parallel/psTasks.hpp" 32 #include "gc/parallel/psYoungGen.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/prefetch.inline.hpp" 35 36 // Checks an individual oop for missing precise marks. Mark 37 // may be either dirty or newgen. 38 class CheckForUnmarkedOops : public OopClosure { 39 private: 40 PSYoungGen* _young_gen; 41 CardTableExtension* _card_table; 42 HeapWord* _unmarked_addr; 43 44 protected: 45 template <class T> void do_oop_work(T* p) { 46 oop obj = oopDesc::load_decode_heap_oop(p); 47 if (_young_gen->is_in_reserved(obj) && 48 !_card_table->addr_is_marked_imprecise(p)) { 49 // Don't overwrite the first missing card mark 50 if (_unmarked_addr == NULL) { 51 _unmarked_addr = (HeapWord*)p; 52 } 53 } 54 } 55 56 public: 57 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : 58 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } 59 60 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } 61 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } 62 63 bool has_unmarked_oop() { 64 return _unmarked_addr != NULL; 65 } 66 }; 67 68 // Checks all objects for the existence of some type of mark, 69 // precise or imprecise, dirty or newgen. 70 class CheckForUnmarkedObjects : public ObjectClosure { 71 private: 72 PSYoungGen* _young_gen; 73 CardTableExtension* _card_table; 74 75 public: 76 CheckForUnmarkedObjects() { 77 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 78 _young_gen = heap->young_gen(); 79 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); 80 // No point in asserting barrier set type here. Need to make CardTableExtension 81 // a unique barrier set type. 82 } 83 84 // Card marks are not precise. The current system can leave us with 85 // a mismatch of precise marks and beginning of object marks. This means 86 // we test for missing precise marks first. If any are found, we don't 87 // fail unless the object head is also unmarked. 88 virtual void do_object(oop obj) { 89 CheckForUnmarkedOops object_check(_young_gen, _card_table); 90 obj->oop_iterate_no_header(&object_check); 91 if (object_check.has_unmarked_oop()) { 92 guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); 93 } 94 } 95 }; 96 97 // Checks for precise marking of oops as newgen. 98 class CheckForPreciseMarks : public OopClosure { 99 private: 100 PSYoungGen* _young_gen; 101 CardTableExtension* _card_table; 102 103 protected: 104 template <class T> void do_oop_work(T* p) { 105 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 106 if (_young_gen->is_in_reserved(obj)) { 107 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); 108 _card_table->set_card_newgen(p); 109 } 110 } 111 112 public: 113 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : 114 _young_gen(young_gen), _card_table(card_table) { } 115 116 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } 117 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } 118 }; 119 120 // We get passed the space_top value to prevent us from traversing into 121 // the old_gen promotion labs, which cannot be safely parsed. 122 123 // Do not call this method if the space is empty. 124 // It is a waste to start tasks and get here only to 125 // do no work. If this method needs to be called 126 // when the space is empty, fix the calculation of 127 // end_card to allow sp_top == sp->bottom(). 128 129 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, 130 MutableSpace* sp, 131 HeapWord* space_top, 132 PSPromotionManager* pm, 133 uint stripe_number, 134 uint stripe_total) { 135 int ssize = 128; // Naked constant! Work unit = 64k. 136 int dirty_card_count = 0; 137 138 // It is a waste to get here if empty. 139 assert(sp->bottom() < sp->top(), "Should not be called if empty"); 140 oop* sp_top = (oop*)space_top; 141 jbyte* start_card = byte_for(sp->bottom()); 142 jbyte* end_card = byte_for(sp_top - 1) + 1; 143 oop* last_scanned = NULL; // Prevent scanning objects more than once 144 // The width of the stripe ssize*stripe_total must be 145 // consistent with the number of stripes so that the complete slice 146 // is covered. 147 size_t slice_width = ssize * stripe_total; 148 for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { 149 jbyte* worker_start_card = slice + stripe_number * ssize; 150 if (worker_start_card >= end_card) 151 return; // We're done. 152 153 jbyte* worker_end_card = worker_start_card + ssize; 154 if (worker_end_card > end_card) 155 worker_end_card = end_card; 156 157 // We do not want to scan objects more than once. In order to accomplish 158 // this, we assert that any object with an object head inside our 'slice' 159 // belongs to us. We may need to extend the range of scanned cards if the 160 // last object continues into the next 'slice'. 161 // 162 // Note! ending cards are exclusive! 163 HeapWord* slice_start = addr_for(worker_start_card); 164 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); 165 166 #ifdef ASSERT 167 if (GCWorkerDelayMillis > 0) { 168 // Delay 1 worker so that it proceeds after all the work 169 // has been completed. 170 if (stripe_number < 2) { 171 os::sleep(Thread::current(), GCWorkerDelayMillis, false); 172 } 173 } 174 #endif 175 176 // If there are not objects starting within the chunk, skip it. 177 if (!start_array->object_starts_in_range(slice_start, slice_end)) { 178 continue; 179 } 180 // Update our beginning addr 181 HeapWord* first_object = start_array->object_start(slice_start); 182 debug_only(oop* first_object_within_slice = (oop*) first_object;) 183 if (first_object < slice_start) { 184 last_scanned = (oop*)(first_object + oop(first_object)->size()); 185 debug_only(first_object_within_slice = last_scanned;) 186 worker_start_card = byte_for(last_scanned); 187 } 188 189 // Update the ending addr 190 if (slice_end < (HeapWord*)sp_top) { 191 // The subtraction is important! An object may start precisely at slice_end. 192 HeapWord* last_object = start_array->object_start(slice_end - 1); 193 slice_end = last_object + oop(last_object)->size(); 194 // worker_end_card is exclusive, so bump it one past the end of last_object's 195 // covered span. 196 worker_end_card = byte_for(slice_end) + 1; 197 198 if (worker_end_card > end_card) 199 worker_end_card = end_card; 200 } 201 202 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); 203 assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); 204 assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); 205 // Note that worker_start_card >= worker_end_card is legal, and happens when 206 // an object spans an entire slice. 207 assert(worker_start_card <= end_card, "worker start card beyond end card"); 208 assert(worker_end_card <= end_card, "worker end card beyond end card"); 209 210 jbyte* current_card = worker_start_card; 211 while (current_card < worker_end_card) { 212 // Find an unclean card. 213 while (current_card < worker_end_card && card_is_clean(*current_card)) { 214 current_card++; 215 } 216 jbyte* first_unclean_card = current_card; 217 218 // Find the end of a run of contiguous unclean cards 219 while (current_card < worker_end_card && !card_is_clean(*current_card)) { 220 while (current_card < worker_end_card && !card_is_clean(*current_card)) { 221 current_card++; 222 } 223 224 if (current_card < worker_end_card) { 225 // Some objects may be large enough to span several cards. If such 226 // an object has more than one dirty card, separated by a clean card, 227 // we will attempt to scan it twice. The test against "last_scanned" 228 // prevents the redundant object scan, but it does not prevent newly 229 // marked cards from being cleaned. 230 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); 231 size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); 232 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; 233 jbyte* ending_card_of_last_object = byte_for(end_of_last_object); 234 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); 235 if (ending_card_of_last_object > current_card) { 236 // This means the object spans the next complete card. 237 // We need to bump the current_card to ending_card_of_last_object 238 current_card = ending_card_of_last_object; 239 } 240 } 241 } 242 jbyte* following_clean_card = current_card; 243 244 if (first_unclean_card < worker_end_card) { 245 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); 246 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); 247 // "p" should always be >= "last_scanned" because newly GC dirtied 248 // cards are no longer scanned again (see comment at end 249 // of loop on the increment of "current_card"). Test that 250 // hypothesis before removing this code. 251 // If this code is removed, deal with the first time through 252 // the loop when the last_scanned is the object starting in 253 // the previous slice. 254 assert((p >= last_scanned) || 255 (last_scanned == first_object_within_slice), 256 "Should no longer be possible"); 257 if (p < last_scanned) { 258 // Avoid scanning more than once; this can happen because 259 // newgen cards set by GC may a different set than the 260 // originally dirty set 261 p = last_scanned; 262 } 263 oop* to = (oop*)addr_for(following_clean_card); 264 265 // Test slice_end first! 266 if ((HeapWord*)to > slice_end) { 267 to = (oop*)slice_end; 268 } else if (to > sp_top) { 269 to = sp_top; 270 } 271 272 // we know which cards to scan, now clear them 273 if (first_unclean_card <= worker_start_card+1) 274 first_unclean_card = worker_start_card+1; 275 if (following_clean_card >= worker_end_card-1) 276 following_clean_card = worker_end_card-1; 277 278 while (first_unclean_card < following_clean_card) { 279 *first_unclean_card++ = clean_card; 280 } 281 282 const int interval = PrefetchScanIntervalInBytes; 283 // scan all objects in the range 284 if (interval != 0) { 285 while (p < to) { 286 Prefetch::write(p, interval); 287 oop m = oop(p); 288 assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m))); 289 pm->push_contents(m); 290 p += m->size(); 291 } 292 pm->drain_stacks_cond_depth(); 293 } else { 294 while (p < to) { 295 oop m = oop(p); 296 assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m))); 297 pm->push_contents(m); 298 p += m->size(); 299 } 300 pm->drain_stacks_cond_depth(); 301 } 302 last_scanned = p; 303 } 304 // "current_card" is still the "following_clean_card" or 305 // the current_card is >= the worker_end_card so the 306 // loop will not execute again. 307 assert((current_card == following_clean_card) || 308 (current_card >= worker_end_card), 309 "current_card should only be incremented if it still equals " 310 "following_clean_card"); 311 // Increment current_card so that it is not processed again. 312 // It may now be dirty because a old-to-young pointer was 313 // found on it an updated. If it is now dirty, it cannot be 314 // be safely cleaned in the next iteration. 315 current_card++; 316 } 317 } 318 } 319 320 // This should be called before a scavenge. 321 void CardTableExtension::verify_all_young_refs_imprecise() { 322 CheckForUnmarkedObjects check; 323 324 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 325 PSOldGen* old_gen = heap->old_gen(); 326 327 old_gen->object_iterate(&check); 328 } 329 330 // This should be called immediately after a scavenge, before mutators resume. 331 void CardTableExtension::verify_all_young_refs_precise() { 332 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 333 PSOldGen* old_gen = heap->old_gen(); 334 335 CheckForPreciseMarks check( 336 heap->young_gen(), 337 barrier_set_cast<CardTableExtension>(heap->barrier_set())); 338 339 old_gen->oop_iterate_no_header(&check); 340 341 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); 342 } 343 344 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { 345 CardTableExtension* card_table = 346 barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set()); 347 348 jbyte* bot = card_table->byte_for(mr.start()); 349 jbyte* top = card_table->byte_for(mr.end()); 350 while(bot <= top) { 351 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); 352 if (*bot == verify_card) 353 *bot = youngergen_card; 354 bot++; 355 } 356 } 357 358 bool CardTableExtension::addr_is_marked_imprecise(void *addr) { 359 jbyte* p = byte_for(addr); 360 jbyte val = *p; 361 362 if (card_is_dirty(val)) 363 return true; 364 365 if (card_is_newgen(val)) 366 return true; 367 368 if (card_is_clean(val)) 369 return false; 370 371 assert(false, "Found unhandled card mark type"); 372 373 return false; 374 } 375 376 // Also includes verify_card 377 bool CardTableExtension::addr_is_marked_precise(void *addr) { 378 jbyte* p = byte_for(addr); 379 jbyte val = *p; 380 381 if (card_is_newgen(val)) 382 return true; 383 384 if (card_is_verify(val)) 385 return true; 386 387 if (card_is_clean(val)) 388 return false; 389 390 if (card_is_dirty(val)) 391 return false; 392 393 assert(false, "Found unhandled card mark type"); 394 395 return false; 396 } 397 398 // Assumes that only the base or the end changes. This allows indentification 399 // of the region that is being resized. The 400 // CardTableModRefBS::resize_covered_region() is used for the normal case 401 // where the covered regions are growing or shrinking at the high end. 402 // The method resize_covered_region_by_end() is analogous to 403 // CardTableModRefBS::resize_covered_region() but 404 // for regions that grow or shrink at the low end. 405 void CardTableExtension::resize_covered_region(MemRegion new_region) { 406 407 for (int i = 0; i < _cur_covered_regions; i++) { 408 if (_covered[i].start() == new_region.start()) { 409 // Found a covered region with the same start as the 410 // new region. The region is growing or shrinking 411 // from the start of the region. 412 resize_covered_region_by_start(new_region); 413 return; 414 } 415 if (_covered[i].start() > new_region.start()) { 416 break; 417 } 418 } 419 420 int changed_region = -1; 421 for (int j = 0; j < _cur_covered_regions; j++) { 422 if (_covered[j].end() == new_region.end()) { 423 changed_region = j; 424 // This is a case where the covered region is growing or shrinking 425 // at the start of the region. 426 assert(changed_region != -1, "Don't expect to add a covered region"); 427 assert(_covered[changed_region].byte_size() != new_region.byte_size(), 428 "The sizes should be different here"); 429 resize_covered_region_by_end(changed_region, new_region); 430 return; 431 } 432 } 433 // This should only be a new covered region (where no existing 434 // covered region matches at the start or the end). 435 assert(_cur_covered_regions < _max_covered_regions, 436 "An existing region should have been found"); 437 resize_covered_region_by_start(new_region); 438 } 439 440 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { 441 CardTableModRefBS::resize_covered_region(new_region); 442 debug_only(verify_guard();) 443 } 444 445 void CardTableExtension::resize_covered_region_by_end(int changed_region, 446 MemRegion new_region) { 447 assert(SafepointSynchronize::is_at_safepoint(), 448 "Only expect an expansion at the low end at a GC"); 449 debug_only(verify_guard();) 450 #ifdef ASSERT 451 for (int k = 0; k < _cur_covered_regions; k++) { 452 if (_covered[k].end() == new_region.end()) { 453 assert(changed_region == k, "Changed region is incorrect"); 454 break; 455 } 456 } 457 #endif 458 459 // Commit new or uncommit old pages, if necessary. 460 if (resize_commit_uncommit(changed_region, new_region)) { 461 // Set the new start of the committed region 462 resize_update_committed_table(changed_region, new_region); 463 } 464 465 // Update card table entries 466 resize_update_card_table_entries(changed_region, new_region); 467 468 // Update the covered region 469 resize_update_covered_table(changed_region, new_region); 470 471 if (TraceCardTableModRefBS) { 472 int ind = changed_region; 473 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); 474 gclog_or_tty->print_cr(" " 475 " _covered[%d].start(): " INTPTR_FORMAT 476 " _covered[%d].last(): " INTPTR_FORMAT, 477 ind, p2i(_covered[ind].start()), 478 ind, p2i(_covered[ind].last())); 479 gclog_or_tty->print_cr(" " 480 " _committed[%d].start(): " INTPTR_FORMAT 481 " _committed[%d].last(): " INTPTR_FORMAT, 482 ind, p2i(_committed[ind].start()), 483 ind, p2i(_committed[ind].last())); 484 gclog_or_tty->print_cr(" " 485 " byte_for(start): " INTPTR_FORMAT 486 " byte_for(last): " INTPTR_FORMAT, 487 p2i(byte_for(_covered[ind].start())), 488 p2i(byte_for(_covered[ind].last()))); 489 gclog_or_tty->print_cr(" " 490 " addr_for(start): " INTPTR_FORMAT 491 " addr_for(last): " INTPTR_FORMAT, 492 p2i(addr_for((jbyte*) _committed[ind].start())), 493 p2i(addr_for((jbyte*) _committed[ind].last()))); 494 } 495 debug_only(verify_guard();) 496 } 497 498 bool CardTableExtension::resize_commit_uncommit(int changed_region, 499 MemRegion new_region) { 500 bool result = false; 501 // Commit new or uncommit old pages, if necessary. 502 MemRegion cur_committed = _committed[changed_region]; 503 assert(_covered[changed_region].end() == new_region.end(), 504 "The ends of the regions are expected to match"); 505 // Extend the start of this _committed region to 506 // to cover the start of any previous _committed region. 507 // This forms overlapping regions, but never interior regions. 508 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); 509 if (min_prev_start < cur_committed.start()) { 510 // Only really need to set start of "cur_committed" to 511 // the new start (min_prev_start) but assertion checking code 512 // below use cur_committed.end() so make it correct. 513 MemRegion new_committed = 514 MemRegion(min_prev_start, cur_committed.end()); 515 cur_committed = new_committed; 516 } 517 #ifdef ASSERT 518 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 519 assert(cur_committed.start() == 520 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), 521 os::vm_page_size()), 522 "Starts should have proper alignment"); 523 #endif 524 525 jbyte* new_start = byte_for(new_region.start()); 526 // Round down because this is for the start address 527 HeapWord* new_start_aligned = 528 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); 529 // The guard page is always committed and should not be committed over. 530 // This method is used in cases where the generation is growing toward 531 // lower addresses but the guard region is still at the end of the 532 // card table. That still makes sense when looking for writes 533 // off the end of the card table. 534 if (new_start_aligned < cur_committed.start()) { 535 // Expand the committed region 536 // 537 // Case A 538 // |+ guard +| 539 // |+ cur committed +++++++++| 540 // |+ new committed +++++++++++++++++| 541 // 542 // Case B 543 // |+ guard +| 544 // |+ cur committed +| 545 // |+ new committed +++++++| 546 // 547 // These are not expected because the calculation of the 548 // cur committed region and the new committed region 549 // share the same end for the covered region. 550 // Case C 551 // |+ guard +| 552 // |+ cur committed +| 553 // |+ new committed +++++++++++++++++| 554 // Case D 555 // |+ guard +| 556 // |+ cur committed +++++++++++| 557 // |+ new committed +++++++| 558 559 HeapWord* new_end_for_commit = 560 MIN2(cur_committed.end(), _guard_region.start()); 561 if(new_start_aligned < new_end_for_commit) { 562 MemRegion new_committed = 563 MemRegion(new_start_aligned, new_end_for_commit); 564 os::commit_memory_or_exit((char*)new_committed.start(), 565 new_committed.byte_size(), !ExecMem, 566 "card table expansion"); 567 } 568 result = true; 569 } else if (new_start_aligned > cur_committed.start()) { 570 // Shrink the committed region 571 #if 0 // uncommitting space is currently unsafe because of the interactions 572 // of growing and shrinking regions. One region A can uncommit space 573 // that it owns but which is being used by another region B (maybe). 574 // Region B has not committed the space because it was already 575 // committed by region A. 576 MemRegion uncommit_region = committed_unique_to_self(changed_region, 577 MemRegion(cur_committed.start(), new_start_aligned)); 578 if (!uncommit_region.is_empty()) { 579 if (!os::uncommit_memory((char*)uncommit_region.start(), 580 uncommit_region.byte_size())) { 581 // If the uncommit fails, ignore it. Let the 582 // committed table resizing go even though the committed 583 // table will over state the committed space. 584 } 585 } 586 #else 587 assert(!result, "Should be false with current workaround"); 588 #endif 589 } 590 assert(_committed[changed_region].end() == cur_committed.end(), 591 "end should not change"); 592 return result; 593 } 594 595 void CardTableExtension::resize_update_committed_table(int changed_region, 596 MemRegion new_region) { 597 598 jbyte* new_start = byte_for(new_region.start()); 599 // Set the new start of the committed region 600 HeapWord* new_start_aligned = 601 (HeapWord*)align_size_down((uintptr_t)new_start, 602 os::vm_page_size()); 603 MemRegion new_committed = MemRegion(new_start_aligned, 604 _committed[changed_region].end()); 605 _committed[changed_region] = new_committed; 606 _committed[changed_region].set_start(new_start_aligned); 607 } 608 609 void CardTableExtension::resize_update_card_table_entries(int changed_region, 610 MemRegion new_region) { 611 debug_only(verify_guard();) 612 MemRegion original_covered = _covered[changed_region]; 613 // Initialize the card entries. Only consider the 614 // region covered by the card table (_whole_heap) 615 jbyte* entry; 616 if (new_region.start() < _whole_heap.start()) { 617 entry = byte_for(_whole_heap.start()); 618 } else { 619 entry = byte_for(new_region.start()); 620 } 621 jbyte* end = byte_for(original_covered.start()); 622 // If _whole_heap starts at the original covered regions start, 623 // this loop will not execute. 624 while (entry < end) { *entry++ = clean_card; } 625 } 626 627 void CardTableExtension::resize_update_covered_table(int changed_region, 628 MemRegion new_region) { 629 // Update the covered region 630 _covered[changed_region].set_start(new_region.start()); 631 _covered[changed_region].set_word_size(new_region.word_size()); 632 633 // reorder regions. There should only be at most 1 out 634 // of order. 635 for (int i = _cur_covered_regions-1 ; i > 0; i--) { 636 if (_covered[i].start() < _covered[i-1].start()) { 637 MemRegion covered_mr = _covered[i-1]; 638 _covered[i-1] = _covered[i]; 639 _covered[i] = covered_mr; 640 MemRegion committed_mr = _committed[i-1]; 641 _committed[i-1] = _committed[i]; 642 _committed[i] = committed_mr; 643 break; 644 } 645 } 646 #ifdef ASSERT 647 for (int m = 0; m < _cur_covered_regions-1; m++) { 648 assert(_covered[m].start() <= _covered[m+1].start(), 649 "Covered regions out of order"); 650 assert(_committed[m].start() <= _committed[m+1].start(), 651 "Committed regions out of order"); 652 } 653 #endif 654 } 655 656 // Returns the start of any committed region that is lower than 657 // the target committed region (index ind) and that intersects the 658 // target region. If none, return start of target region. 659 // 660 // ------------- 661 // | | 662 // ------------- 663 // ------------ 664 // | target | 665 // ------------ 666 // ------------- 667 // | | 668 // ------------- 669 // ^ returns this 670 // 671 // ------------- 672 // | | 673 // ------------- 674 // ------------ 675 // | target | 676 // ------------ 677 // ------------- 678 // | | 679 // ------------- 680 // ^ returns this 681 682 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { 683 assert(_cur_covered_regions >= 0, "Expecting at least on region"); 684 HeapWord* min_start = _committed[ind].start(); 685 for (int j = 0; j < ind; j++) { 686 HeapWord* this_start = _committed[j].start(); 687 if ((this_start < min_start) && 688 !(_committed[j].intersection(_committed[ind])).is_empty()) { 689 min_start = this_start; 690 } 691 } 692 return min_start; 693 }