1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 29 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 30 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 31 #include "gc_implementation/parallelScavenge/psTasks.hpp" 32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/prefetch.inline.hpp" 35 36 // Checks an individual oop for missing precise marks. Mark 37 // may be either dirty or newgen. 38 class CheckForUnmarkedOops : public OopClosure { 39 private: 40 PSYoungGen* _young_gen; 41 CardTableExtension* _card_table; 42 HeapWord* _unmarked_addr; 43 jbyte* _unmarked_card; 44 45 protected: 46 template <class T> void do_oop_work(T* p) { 47 oop obj = oopDesc::load_decode_heap_oop(p); 48 if (_young_gen->is_in_reserved(obj) && 49 !_card_table->addr_is_marked_imprecise(p)) { 50 // Don't overwrite the first missing card mark 51 if (_unmarked_addr == NULL) { 52 _unmarked_addr = (HeapWord*)p; 53 _unmarked_card = _card_table->byte_for(p); 54 } 55 } 56 } 57 58 public: 59 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : 60 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } 61 62 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } 63 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } 64 65 bool has_unmarked_oop() { 66 return _unmarked_addr != NULL; 67 } 68 }; 69 70 // Checks all objects for the existence of some type of mark, 71 // precise or imprecise, dirty or newgen. 72 class CheckForUnmarkedObjects : public ObjectClosure { 73 private: 74 PSYoungGen* _young_gen; 75 CardTableExtension* _card_table; 76 77 public: 78 CheckForUnmarkedObjects() { 79 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 80 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 81 82 _young_gen = heap->young_gen(); 83 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); 84 // No point in asserting barrier set type here. Need to make CardTableExtension 85 // a unique barrier set type. 86 } 87 88 // Card marks are not precise. The current system can leave us with 89 // a mismatch of precise marks and beginning of object marks. This means 90 // we test for missing precise marks first. If any are found, we don't 91 // fail unless the object head is also unmarked. 92 virtual void do_object(oop obj) { 93 CheckForUnmarkedOops object_check(_young_gen, _card_table); 94 obj->oop_iterate_no_header(&object_check); 95 if (object_check.has_unmarked_oop()) { 96 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); 97 } 98 } 99 }; 100 101 // Checks for precise marking of oops as newgen. 102 class CheckForPreciseMarks : public OopClosure { 103 private: 104 PSYoungGen* _young_gen; 105 CardTableExtension* _card_table; 106 107 protected: 108 template <class T> void do_oop_work(T* p) { 109 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 110 if (_young_gen->is_in_reserved(obj)) { 111 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); 112 _card_table->set_card_newgen(p); 113 } 114 } 115 116 public: 117 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : 118 _young_gen(young_gen), _card_table(card_table) { } 119 120 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } 121 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } 122 }; 123 124 // We get passed the space_top value to prevent us from traversing into 125 // the old_gen promotion labs, which cannot be safely parsed. 126 127 // Do not call this method if the space is empty. 128 // It is a waste to start tasks and get here only to 129 // do no work. If this method needs to be called 130 // when the space is empty, fix the calculation of 131 // end_card to allow sp_top == sp->bottom(). 132 133 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, 134 MutableSpace* sp, 135 HeapWord* space_top, 136 PSPromotionManager* pm, 137 uint stripe_number, 138 uint stripe_total) { 139 int ssize = 128; // Naked constant! Work unit = 64k. 140 int dirty_card_count = 0; 141 142 // It is a waste to get here if empty. 143 assert(sp->bottom() < sp->top(), "Should not be called if empty"); 144 oop* sp_top = (oop*)space_top; 145 jbyte* start_card = byte_for(sp->bottom()); 146 jbyte* end_card = byte_for(sp_top - 1) + 1; 147 oop* last_scanned = NULL; // Prevent scanning objects more than once 148 // The width of the stripe ssize*stripe_total must be 149 // consistent with the number of stripes so that the complete slice 150 // is covered. 151 size_t slice_width = ssize * stripe_total; 152 for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { 153 jbyte* worker_start_card = slice + stripe_number * ssize; 154 if (worker_start_card >= end_card) 155 return; // We're done. 156 157 jbyte* worker_end_card = worker_start_card + ssize; 158 if (worker_end_card > end_card) 159 worker_end_card = end_card; 160 161 // We do not want to scan objects more than once. In order to accomplish 162 // this, we assert that any object with an object head inside our 'slice' 163 // belongs to us. We may need to extend the range of scanned cards if the 164 // last object continues into the next 'slice'. 165 // 166 // Note! ending cards are exclusive! 167 HeapWord* slice_start = addr_for(worker_start_card); 168 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); 169 170 #ifdef ASSERT 171 if (GCWorkerDelayMillis > 0) { 172 // Delay 1 worker so that it proceeds after all the work 173 // has been completed. 174 if (stripe_number < 2) { 175 os::sleep(Thread::current(), GCWorkerDelayMillis, false); 176 } 177 } 178 #endif 179 180 // If there are not objects starting within the chunk, skip it. 181 if (!start_array->object_starts_in_range(slice_start, slice_end)) { 182 continue; 183 } 184 // Update our beginning addr 185 HeapWord* first_object = start_array->object_start(slice_start); 186 debug_only(oop* first_object_within_slice = (oop*) first_object;) 187 if (first_object < slice_start) { 188 last_scanned = (oop*)(first_object + oop(first_object)->size()); 189 debug_only(first_object_within_slice = last_scanned;) 190 worker_start_card = byte_for(last_scanned); 191 } 192 193 // Update the ending addr 194 if (slice_end < (HeapWord*)sp_top) { 195 // The subtraction is important! An object may start precisely at slice_end. 196 HeapWord* last_object = start_array->object_start(slice_end - 1); 197 slice_end = last_object + oop(last_object)->size(); 198 // worker_end_card is exclusive, so bump it one past the end of last_object's 199 // covered span. 200 worker_end_card = byte_for(slice_end) + 1; 201 202 if (worker_end_card > end_card) 203 worker_end_card = end_card; 204 } 205 206 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); 207 assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); 208 assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); 209 // Note that worker_start_card >= worker_end_card is legal, and happens when 210 // an object spans an entire slice. 211 assert(worker_start_card <= end_card, "worker start card beyond end card"); 212 assert(worker_end_card <= end_card, "worker end card beyond end card"); 213 214 jbyte* current_card = worker_start_card; 215 while (current_card < worker_end_card) { 216 // Find an unclean card. 217 while (current_card < worker_end_card && card_is_clean(*current_card)) { 218 current_card++; 219 } 220 jbyte* first_unclean_card = current_card; 221 222 // Find the end of a run of contiguous unclean cards 223 while (current_card < worker_end_card && !card_is_clean(*current_card)) { 224 while (current_card < worker_end_card && !card_is_clean(*current_card)) { 225 current_card++; 226 } 227 228 if (current_card < worker_end_card) { 229 // Some objects may be large enough to span several cards. If such 230 // an object has more than one dirty card, separated by a clean card, 231 // we will attempt to scan it twice. The test against "last_scanned" 232 // prevents the redundant object scan, but it does not prevent newly 233 // marked cards from being cleaned. 234 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); 235 size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); 236 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; 237 jbyte* ending_card_of_last_object = byte_for(end_of_last_object); 238 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); 239 if (ending_card_of_last_object > current_card) { 240 // This means the object spans the next complete card. 241 // We need to bump the current_card to ending_card_of_last_object 242 current_card = ending_card_of_last_object; 243 } 244 } 245 } 246 jbyte* following_clean_card = current_card; 247 248 if (first_unclean_card < worker_end_card) { 249 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); 250 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); 251 // "p" should always be >= "last_scanned" because newly GC dirtied 252 // cards are no longer scanned again (see comment at end 253 // of loop on the increment of "current_card"). Test that 254 // hypothesis before removing this code. 255 // If this code is removed, deal with the first time through 256 // the loop when the last_scanned is the object starting in 257 // the previous slice. 258 assert((p >= last_scanned) || 259 (last_scanned == first_object_within_slice), 260 "Should no longer be possible"); 261 if (p < last_scanned) { 262 // Avoid scanning more than once; this can happen because 263 // newgen cards set by GC may a different set than the 264 // originally dirty set 265 p = last_scanned; 266 } 267 oop* to = (oop*)addr_for(following_clean_card); 268 269 // Test slice_end first! 270 if ((HeapWord*)to > slice_end) { 271 to = (oop*)slice_end; 272 } else if (to > sp_top) { 273 to = sp_top; 274 } 275 276 // we know which cards to scan, now clear them 277 if (first_unclean_card <= worker_start_card+1) 278 first_unclean_card = worker_start_card+1; 279 if (following_clean_card >= worker_end_card-1) 280 following_clean_card = worker_end_card-1; 281 282 while (first_unclean_card < following_clean_card) { 283 *first_unclean_card++ = clean_card; 284 } 285 286 const int interval = PrefetchScanIntervalInBytes; 287 // scan all objects in the range 288 if (interval != 0) { 289 while (p < to) { 290 Prefetch::write(p, interval); 291 oop m = oop(p); 292 assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m))); 293 pm->push_contents(m); 294 p += m->size(); 295 } 296 pm->drain_stacks_cond_depth(); 297 } else { 298 while (p < to) { 299 oop m = oop(p); 300 assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m))); 301 pm->push_contents(m); 302 p += m->size(); 303 } 304 pm->drain_stacks_cond_depth(); 305 } 306 last_scanned = p; 307 } 308 // "current_card" is still the "following_clean_card" or 309 // the current_card is >= the worker_end_card so the 310 // loop will not execute again. 311 assert((current_card == following_clean_card) || 312 (current_card >= worker_end_card), 313 "current_card should only be incremented if it still equals " 314 "following_clean_card"); 315 // Increment current_card so that it is not processed again. 316 // It may now be dirty because a old-to-young pointer was 317 // found on it an updated. If it is now dirty, it cannot be 318 // be safely cleaned in the next iteration. 319 current_card++; 320 } 321 } 322 } 323 324 // This should be called before a scavenge. 325 void CardTableExtension::verify_all_young_refs_imprecise() { 326 CheckForUnmarkedObjects check; 327 328 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 329 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 330 331 PSOldGen* old_gen = heap->old_gen(); 332 333 old_gen->object_iterate(&check); 334 } 335 336 // This should be called immediately after a scavenge, before mutators resume. 337 void CardTableExtension::verify_all_young_refs_precise() { 338 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 339 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 340 341 PSOldGen* old_gen = heap->old_gen(); 342 343 CheckForPreciseMarks check( 344 heap->young_gen(), 345 barrier_set_cast<CardTableExtension>(heap->barrier_set())); 346 347 old_gen->oop_iterate_no_header(&check); 348 349 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); 350 } 351 352 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { 353 CardTableExtension* card_table = 354 barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set()); 355 356 jbyte* bot = card_table->byte_for(mr.start()); 357 jbyte* top = card_table->byte_for(mr.end()); 358 while(bot <= top) { 359 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); 360 if (*bot == verify_card) 361 *bot = youngergen_card; 362 bot++; 363 } 364 } 365 366 bool CardTableExtension::addr_is_marked_imprecise(void *addr) { 367 jbyte* p = byte_for(addr); 368 jbyte val = *p; 369 370 if (card_is_dirty(val)) 371 return true; 372 373 if (card_is_newgen(val)) 374 return true; 375 376 if (card_is_clean(val)) 377 return false; 378 379 assert(false, "Found unhandled card mark type"); 380 381 return false; 382 } 383 384 // Also includes verify_card 385 bool CardTableExtension::addr_is_marked_precise(void *addr) { 386 jbyte* p = byte_for(addr); 387 jbyte val = *p; 388 389 if (card_is_newgen(val)) 390 return true; 391 392 if (card_is_verify(val)) 393 return true; 394 395 if (card_is_clean(val)) 396 return false; 397 398 if (card_is_dirty(val)) 399 return false; 400 401 assert(false, "Found unhandled card mark type"); 402 403 return false; 404 } 405 406 // Assumes that only the base or the end changes. This allows indentification 407 // of the region that is being resized. The 408 // CardTableModRefBS::resize_covered_region() is used for the normal case 409 // where the covered regions are growing or shrinking at the high end. 410 // The method resize_covered_region_by_end() is analogous to 411 // CardTableModRefBS::resize_covered_region() but 412 // for regions that grow or shrink at the low end. 413 void CardTableExtension::resize_covered_region(MemRegion new_region) { 414 415 for (int i = 0; i < _cur_covered_regions; i++) { 416 if (_covered[i].start() == new_region.start()) { 417 // Found a covered region with the same start as the 418 // new region. The region is growing or shrinking 419 // from the start of the region. 420 resize_covered_region_by_start(new_region); 421 return; 422 } 423 if (_covered[i].start() > new_region.start()) { 424 break; 425 } 426 } 427 428 int changed_region = -1; 429 for (int j = 0; j < _cur_covered_regions; j++) { 430 if (_covered[j].end() == new_region.end()) { 431 changed_region = j; 432 // This is a case where the covered region is growing or shrinking 433 // at the start of the region. 434 assert(changed_region != -1, "Don't expect to add a covered region"); 435 assert(_covered[changed_region].byte_size() != new_region.byte_size(), 436 "The sizes should be different here"); 437 resize_covered_region_by_end(changed_region, new_region); 438 return; 439 } 440 } 441 // This should only be a new covered region (where no existing 442 // covered region matches at the start or the end). 443 assert(_cur_covered_regions < _max_covered_regions, 444 "An existing region should have been found"); 445 resize_covered_region_by_start(new_region); 446 } 447 448 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { 449 CardTableModRefBS::resize_covered_region(new_region); 450 debug_only(verify_guard();) 451 } 452 453 void CardTableExtension::resize_covered_region_by_end(int changed_region, 454 MemRegion new_region) { 455 assert(SafepointSynchronize::is_at_safepoint(), 456 "Only expect an expansion at the low end at a GC"); 457 debug_only(verify_guard();) 458 #ifdef ASSERT 459 for (int k = 0; k < _cur_covered_regions; k++) { 460 if (_covered[k].end() == new_region.end()) { 461 assert(changed_region == k, "Changed region is incorrect"); 462 break; 463 } 464 } 465 #endif 466 467 // Commit new or uncommit old pages, if necessary. 468 if (resize_commit_uncommit(changed_region, new_region)) { 469 // Set the new start of the committed region 470 resize_update_committed_table(changed_region, new_region); 471 } 472 473 // Update card table entries 474 resize_update_card_table_entries(changed_region, new_region); 475 476 // Update the covered region 477 resize_update_covered_table(changed_region, new_region); 478 479 if (TraceCardTableModRefBS) { 480 int ind = changed_region; 481 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); 482 gclog_or_tty->print_cr(" " 483 " _covered[%d].start(): " INTPTR_FORMAT 484 " _covered[%d].last(): " INTPTR_FORMAT, 485 ind, p2i(_covered[ind].start()), 486 ind, p2i(_covered[ind].last())); 487 gclog_or_tty->print_cr(" " 488 " _committed[%d].start(): " INTPTR_FORMAT 489 " _committed[%d].last(): " INTPTR_FORMAT, 490 ind, p2i(_committed[ind].start()), 491 ind, p2i(_committed[ind].last())); 492 gclog_or_tty->print_cr(" " 493 " byte_for(start): " INTPTR_FORMAT 494 " byte_for(last): " INTPTR_FORMAT, 495 p2i(byte_for(_covered[ind].start())), 496 p2i(byte_for(_covered[ind].last()))); 497 gclog_or_tty->print_cr(" " 498 " addr_for(start): " INTPTR_FORMAT 499 " addr_for(last): " INTPTR_FORMAT, 500 p2i(addr_for((jbyte*) _committed[ind].start())), 501 p2i(addr_for((jbyte*) _committed[ind].last()))); 502 } 503 debug_only(verify_guard();) 504 } 505 506 bool CardTableExtension::resize_commit_uncommit(int changed_region, 507 MemRegion new_region) { 508 bool result = false; 509 // Commit new or uncommit old pages, if necessary. 510 MemRegion cur_committed = _committed[changed_region]; 511 assert(_covered[changed_region].end() == new_region.end(), 512 "The ends of the regions are expected to match"); 513 // Extend the start of this _committed region to 514 // to cover the start of any previous _committed region. 515 // This forms overlapping regions, but never interior regions. 516 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); 517 if (min_prev_start < cur_committed.start()) { 518 // Only really need to set start of "cur_committed" to 519 // the new start (min_prev_start) but assertion checking code 520 // below use cur_committed.end() so make it correct. 521 MemRegion new_committed = 522 MemRegion(min_prev_start, cur_committed.end()); 523 cur_committed = new_committed; 524 } 525 #ifdef ASSERT 526 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 527 assert(cur_committed.start() == 528 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), 529 os::vm_page_size()), 530 "Starts should have proper alignment"); 531 #endif 532 533 jbyte* new_start = byte_for(new_region.start()); 534 // Round down because this is for the start address 535 HeapWord* new_start_aligned = 536 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); 537 // The guard page is always committed and should not be committed over. 538 // This method is used in cases where the generation is growing toward 539 // lower addresses but the guard region is still at the end of the 540 // card table. That still makes sense when looking for writes 541 // off the end of the card table. 542 if (new_start_aligned < cur_committed.start()) { 543 // Expand the committed region 544 // 545 // Case A 546 // |+ guard +| 547 // |+ cur committed +++++++++| 548 // |+ new committed +++++++++++++++++| 549 // 550 // Case B 551 // |+ guard +| 552 // |+ cur committed +| 553 // |+ new committed +++++++| 554 // 555 // These are not expected because the calculation of the 556 // cur committed region and the new committed region 557 // share the same end for the covered region. 558 // Case C 559 // |+ guard +| 560 // |+ cur committed +| 561 // |+ new committed +++++++++++++++++| 562 // Case D 563 // |+ guard +| 564 // |+ cur committed +++++++++++| 565 // |+ new committed +++++++| 566 567 HeapWord* new_end_for_commit = 568 MIN2(cur_committed.end(), _guard_region.start()); 569 if(new_start_aligned < new_end_for_commit) { 570 MemRegion new_committed = 571 MemRegion(new_start_aligned, new_end_for_commit); 572 os::commit_memory_or_exit((char*)new_committed.start(), 573 new_committed.byte_size(), !ExecMem, 574 "card table expansion"); 575 } 576 result = true; 577 } else if (new_start_aligned > cur_committed.start()) { 578 // Shrink the committed region 579 #if 0 // uncommitting space is currently unsafe because of the interactions 580 // of growing and shrinking regions. One region A can uncommit space 581 // that it owns but which is being used by another region B (maybe). 582 // Region B has not committed the space because it was already 583 // committed by region A. 584 MemRegion uncommit_region = committed_unique_to_self(changed_region, 585 MemRegion(cur_committed.start(), new_start_aligned)); 586 if (!uncommit_region.is_empty()) { 587 if (!os::uncommit_memory((char*)uncommit_region.start(), 588 uncommit_region.byte_size())) { 589 // If the uncommit fails, ignore it. Let the 590 // committed table resizing go even though the committed 591 // table will over state the committed space. 592 } 593 } 594 #else 595 assert(!result, "Should be false with current workaround"); 596 #endif 597 } 598 assert(_committed[changed_region].end() == cur_committed.end(), 599 "end should not change"); 600 return result; 601 } 602 603 void CardTableExtension::resize_update_committed_table(int changed_region, 604 MemRegion new_region) { 605 606 jbyte* new_start = byte_for(new_region.start()); 607 // Set the new start of the committed region 608 HeapWord* new_start_aligned = 609 (HeapWord*)align_size_down((uintptr_t)new_start, 610 os::vm_page_size()); 611 MemRegion new_committed = MemRegion(new_start_aligned, 612 _committed[changed_region].end()); 613 _committed[changed_region] = new_committed; 614 _committed[changed_region].set_start(new_start_aligned); 615 } 616 617 void CardTableExtension::resize_update_card_table_entries(int changed_region, 618 MemRegion new_region) { 619 debug_only(verify_guard();) 620 MemRegion original_covered = _covered[changed_region]; 621 // Initialize the card entries. Only consider the 622 // region covered by the card table (_whole_heap) 623 jbyte* entry; 624 if (new_region.start() < _whole_heap.start()) { 625 entry = byte_for(_whole_heap.start()); 626 } else { 627 entry = byte_for(new_region.start()); 628 } 629 jbyte* end = byte_for(original_covered.start()); 630 // If _whole_heap starts at the original covered regions start, 631 // this loop will not execute. 632 while (entry < end) { *entry++ = clean_card; } 633 } 634 635 void CardTableExtension::resize_update_covered_table(int changed_region, 636 MemRegion new_region) { 637 // Update the covered region 638 _covered[changed_region].set_start(new_region.start()); 639 _covered[changed_region].set_word_size(new_region.word_size()); 640 641 // reorder regions. There should only be at most 1 out 642 // of order. 643 for (int i = _cur_covered_regions-1 ; i > 0; i--) { 644 if (_covered[i].start() < _covered[i-1].start()) { 645 MemRegion covered_mr = _covered[i-1]; 646 _covered[i-1] = _covered[i]; 647 _covered[i] = covered_mr; 648 MemRegion committed_mr = _committed[i-1]; 649 _committed[i-1] = _committed[i]; 650 _committed[i] = committed_mr; 651 break; 652 } 653 } 654 #ifdef ASSERT 655 for (int m = 0; m < _cur_covered_regions-1; m++) { 656 assert(_covered[m].start() <= _covered[m+1].start(), 657 "Covered regions out of order"); 658 assert(_committed[m].start() <= _committed[m+1].start(), 659 "Committed regions out of order"); 660 } 661 #endif 662 } 663 664 // Returns the start of any committed region that is lower than 665 // the target committed region (index ind) and that intersects the 666 // target region. If none, return start of target region. 667 // 668 // ------------- 669 // | | 670 // ------------- 671 // ------------ 672 // | target | 673 // ------------ 674 // ------------- 675 // | | 676 // ------------- 677 // ^ returns this 678 // 679 // ------------- 680 // | | 681 // ------------- 682 // ------------ 683 // | target | 684 // ------------ 685 // ------------- 686 // | | 687 // ------------- 688 // ^ returns this 689 690 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { 691 assert(_cur_covered_regions >= 0, "Expecting at least on region"); 692 HeapWord* min_start = _committed[ind].start(); 693 for (int j = 0; j < ind; j++) { 694 HeapWord* this_start = _committed[j].start(); 695 if ((this_start < min_start) && 696 !(_committed[j].intersection(_committed[ind])).is_empty()) { 697 min_start = this_start; 698 } 699 } 700 return min_start; 701 }