1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 29 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 30 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 31 #include "gc_implementation/parallelScavenge/psTasks.hpp" 32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "oops/oop.psgc.inline.hpp" 35 #include "runtime/prefetch.inline.hpp" 36 37 // Checks an individual oop for missing precise marks. Mark 38 // may be either dirty or newgen. 39 class CheckForUnmarkedOops : public OopClosure { 40 private: 41 PSYoungGen* _young_gen; 42 CardTableExtension* _card_table; 43 HeapWord* _unmarked_addr; 44 jbyte* _unmarked_card; 45 46 protected: 47 template <class T> void do_oop_work(T* p) { 48 oop obj = oopDesc::load_decode_heap_oop(p); 49 if (_young_gen->is_in_reserved(obj) && 50 !_card_table->addr_is_marked_imprecise(p)) { 51 // Don't overwrite the first missing card mark 52 if (_unmarked_addr == NULL) { 53 _unmarked_addr = (HeapWord*)p; 54 _unmarked_card = _card_table->byte_for(p); 55 } 56 } 57 } 58 59 public: 60 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : 61 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } 62 63 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } 64 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } 65 66 bool has_unmarked_oop() { 67 return _unmarked_addr != NULL; 68 } 69 }; 70 71 // Checks all objects for the existence of some type of mark, 72 // precise or imprecise, dirty or newgen. 73 class CheckForUnmarkedObjects : public ObjectClosure { 74 private: 75 PSYoungGen* _young_gen; 76 CardTableExtension* _card_table; 77 78 public: 79 CheckForUnmarkedObjects() { 80 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 81 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 82 83 _young_gen = heap->young_gen(); 84 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); 85 // No point in asserting barrier set type here. Need to make CardTableExtension 86 // a unique barrier set type. 87 } 88 89 // Card marks are not precise. The current system can leave us with 90 // a mismatch of precise marks and beginning of object marks. This means 91 // we test for missing precise marks first. If any are found, we don't 92 // fail unless the object head is also unmarked. 93 virtual void do_object(oop obj) { 94 CheckForUnmarkedOops object_check(_young_gen, _card_table); 95 obj->oop_iterate_no_header(&object_check); 96 if (object_check.has_unmarked_oop()) { 97 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); 98 } 99 } 100 }; 101 102 // Checks for precise marking of oops as newgen. 103 class CheckForPreciseMarks : public OopClosure { 104 private: 105 PSYoungGen* _young_gen; 106 CardTableExtension* _card_table; 107 108 protected: 109 template <class T> void do_oop_work(T* p) { 110 oop obj = oopDesc::load_decode_heap_oop_not_null(p); 111 if (_young_gen->is_in_reserved(obj)) { 112 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); 113 _card_table->set_card_newgen(p); 114 } 115 } 116 117 public: 118 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : 119 _young_gen(young_gen), _card_table(card_table) { } 120 121 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } 122 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } 123 }; 124 125 // We get passed the space_top value to prevent us from traversing into 126 // the old_gen promotion labs, which cannot be safely parsed. 127 128 // Do not call this method if the space is empty. 129 // It is a waste to start tasks and get here only to 130 // do no work. If this method needs to be called 131 // when the space is empty, fix the calculation of 132 // end_card to allow sp_top == sp->bottom(). 133 134 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, 135 MutableSpace* sp, 136 HeapWord* space_top, 137 PSPromotionManager* pm, 138 uint stripe_number, 139 uint stripe_total) { 140 int ssize = 128; // Naked constant! Work unit = 64k. 141 int dirty_card_count = 0; 142 143 // It is a waste to get here if empty. 144 assert(sp->bottom() < sp->top(), "Should not be called if empty"); 145 oop* sp_top = (oop*)space_top; 146 jbyte* start_card = byte_for(sp->bottom()); 147 jbyte* end_card = byte_for(sp_top - 1) + 1; 148 oop* last_scanned = NULL; // Prevent scanning objects more than once 149 // The width of the stripe ssize*stripe_total must be 150 // consistent with the number of stripes so that the complete slice 151 // is covered. 152 size_t slice_width = ssize * stripe_total; 153 for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { 154 jbyte* worker_start_card = slice + stripe_number * ssize; 155 if (worker_start_card >= end_card) 156 return; // We're done. 157 158 jbyte* worker_end_card = worker_start_card + ssize; 159 if (worker_end_card > end_card) 160 worker_end_card = end_card; 161 162 // We do not want to scan objects more than once. In order to accomplish 163 // this, we assert that any object with an object head inside our 'slice' 164 // belongs to us. We may need to extend the range of scanned cards if the 165 // last object continues into the next 'slice'. 166 // 167 // Note! ending cards are exclusive! 168 HeapWord* slice_start = addr_for(worker_start_card); 169 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); 170 171 #ifdef ASSERT 172 if (GCWorkerDelayMillis > 0) { 173 // Delay 1 worker so that it proceeds after all the work 174 // has been completed. 175 if (stripe_number < 2) { 176 os::sleep(Thread::current(), GCWorkerDelayMillis, false); 177 } 178 } 179 #endif 180 181 // If there are not objects starting within the chunk, skip it. 182 if (!start_array->object_starts_in_range(slice_start, slice_end)) { 183 continue; 184 } 185 // Update our beginning addr 186 HeapWord* first_object = start_array->object_start(slice_start); 187 debug_only(oop* first_object_within_slice = (oop*) first_object;) 188 if (first_object < slice_start) { 189 last_scanned = (oop*)(first_object + oop(first_object)->size()); 190 debug_only(first_object_within_slice = last_scanned;) 191 worker_start_card = byte_for(last_scanned); 192 } 193 194 // Update the ending addr 195 if (slice_end < (HeapWord*)sp_top) { 196 // The subtraction is important! An object may start precisely at slice_end. 197 HeapWord* last_object = start_array->object_start(slice_end - 1); 198 slice_end = last_object + oop(last_object)->size(); 199 // worker_end_card is exclusive, so bump it one past the end of last_object's 200 // covered span. 201 worker_end_card = byte_for(slice_end) + 1; 202 203 if (worker_end_card > end_card) 204 worker_end_card = end_card; 205 } 206 207 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); 208 assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); 209 assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); 210 // Note that worker_start_card >= worker_end_card is legal, and happens when 211 // an object spans an entire slice. 212 assert(worker_start_card <= end_card, "worker start card beyond end card"); 213 assert(worker_end_card <= end_card, "worker end card beyond end card"); 214 215 jbyte* current_card = worker_start_card; 216 while (current_card < worker_end_card) { 217 // Find an unclean card. 218 while (current_card < worker_end_card && card_is_clean(*current_card)) { 219 current_card++; 220 } 221 jbyte* first_unclean_card = current_card; 222 223 // Find the end of a run of contiguous unclean cards 224 while (current_card < worker_end_card && !card_is_clean(*current_card)) { 225 while (current_card < worker_end_card && !card_is_clean(*current_card)) { 226 current_card++; 227 } 228 229 if (current_card < worker_end_card) { 230 // Some objects may be large enough to span several cards. If such 231 // an object has more than one dirty card, separated by a clean card, 232 // we will attempt to scan it twice. The test against "last_scanned" 233 // prevents the redundant object scan, but it does not prevent newly 234 // marked cards from being cleaned. 235 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); 236 size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); 237 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; 238 jbyte* ending_card_of_last_object = byte_for(end_of_last_object); 239 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); 240 if (ending_card_of_last_object > current_card) { 241 // This means the object spans the next complete card. 242 // We need to bump the current_card to ending_card_of_last_object 243 current_card = ending_card_of_last_object; 244 } 245 } 246 } 247 jbyte* following_clean_card = current_card; 248 249 if (first_unclean_card < worker_end_card) { 250 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); 251 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); 252 // "p" should always be >= "last_scanned" because newly GC dirtied 253 // cards are no longer scanned again (see comment at end 254 // of loop on the increment of "current_card"). Test that 255 // hypothesis before removing this code. 256 // If this code is removed, deal with the first time through 257 // the loop when the last_scanned is the object starting in 258 // the previous slice. 259 assert((p >= last_scanned) || 260 (last_scanned == first_object_within_slice), 261 "Should no longer be possible"); 262 if (p < last_scanned) { 263 // Avoid scanning more than once; this can happen because 264 // newgen cards set by GC may a different set than the 265 // originally dirty set 266 p = last_scanned; 267 } 268 oop* to = (oop*)addr_for(following_clean_card); 269 270 // Test slice_end first! 271 if ((HeapWord*)to > slice_end) { 272 to = (oop*)slice_end; 273 } else if (to > sp_top) { 274 to = sp_top; 275 } 276 277 // we know which cards to scan, now clear them 278 if (first_unclean_card <= worker_start_card+1) 279 first_unclean_card = worker_start_card+1; 280 if (following_clean_card >= worker_end_card-1) 281 following_clean_card = worker_end_card-1; 282 283 while (first_unclean_card < following_clean_card) { 284 *first_unclean_card++ = clean_card; 285 } 286 287 const int interval = PrefetchScanIntervalInBytes; 288 // scan all objects in the range 289 if (interval != 0) { 290 while (p < to) { 291 Prefetch::write(p, interval); 292 oop m = oop(p); 293 assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m))); 294 m->push_contents(pm); 295 p += m->size(); 296 } 297 pm->drain_stacks_cond_depth(); 298 } else { 299 while (p < to) { 300 oop m = oop(p); 301 assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m))); 302 m->push_contents(pm); 303 p += m->size(); 304 } 305 pm->drain_stacks_cond_depth(); 306 } 307 last_scanned = p; 308 } 309 // "current_card" is still the "following_clean_card" or 310 // the current_card is >= the worker_end_card so the 311 // loop will not execute again. 312 assert((current_card == following_clean_card) || 313 (current_card >= worker_end_card), 314 "current_card should only be incremented if it still equals " 315 "following_clean_card"); 316 // Increment current_card so that it is not processed again. 317 // It may now be dirty because a old-to-young pointer was 318 // found on it an updated. If it is now dirty, it cannot be 319 // be safely cleaned in the next iteration. 320 current_card++; 321 } 322 } 323 } 324 325 // This should be called before a scavenge. 326 void CardTableExtension::verify_all_young_refs_imprecise() { 327 CheckForUnmarkedObjects check; 328 329 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 330 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 331 332 PSOldGen* old_gen = heap->old_gen(); 333 334 old_gen->object_iterate(&check); 335 } 336 337 // This should be called immediately after a scavenge, before mutators resume. 338 void CardTableExtension::verify_all_young_refs_precise() { 339 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 340 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 341 342 PSOldGen* old_gen = heap->old_gen(); 343 344 CheckForPreciseMarks check( 345 heap->young_gen(), 346 barrier_set_cast<CardTableExtension>(heap->barrier_set())); 347 348 old_gen->oop_iterate_no_header(&check); 349 350 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); 351 } 352 353 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { 354 CardTableExtension* card_table = 355 barrier_set_cast<CardTableExtension>(Universe::heap()->barrier_set()); 356 357 jbyte* bot = card_table->byte_for(mr.start()); 358 jbyte* top = card_table->byte_for(mr.end()); 359 while(bot <= top) { 360 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); 361 if (*bot == verify_card) 362 *bot = youngergen_card; 363 bot++; 364 } 365 } 366 367 bool CardTableExtension::addr_is_marked_imprecise(void *addr) { 368 jbyte* p = byte_for(addr); 369 jbyte val = *p; 370 371 if (card_is_dirty(val)) 372 return true; 373 374 if (card_is_newgen(val)) 375 return true; 376 377 if (card_is_clean(val)) 378 return false; 379 380 assert(false, "Found unhandled card mark type"); 381 382 return false; 383 } 384 385 // Also includes verify_card 386 bool CardTableExtension::addr_is_marked_precise(void *addr) { 387 jbyte* p = byte_for(addr); 388 jbyte val = *p; 389 390 if (card_is_newgen(val)) 391 return true; 392 393 if (card_is_verify(val)) 394 return true; 395 396 if (card_is_clean(val)) 397 return false; 398 399 if (card_is_dirty(val)) 400 return false; 401 402 assert(false, "Found unhandled card mark type"); 403 404 return false; 405 } 406 407 // Assumes that only the base or the end changes. This allows indentification 408 // of the region that is being resized. The 409 // CardTableModRefBS::resize_covered_region() is used for the normal case 410 // where the covered regions are growing or shrinking at the high end. 411 // The method resize_covered_region_by_end() is analogous to 412 // CardTableModRefBS::resize_covered_region() but 413 // for regions that grow or shrink at the low end. 414 void CardTableExtension::resize_covered_region(MemRegion new_region) { 415 416 for (int i = 0; i < _cur_covered_regions; i++) { 417 if (_covered[i].start() == new_region.start()) { 418 // Found a covered region with the same start as the 419 // new region. The region is growing or shrinking 420 // from the start of the region. 421 resize_covered_region_by_start(new_region); 422 return; 423 } 424 if (_covered[i].start() > new_region.start()) { 425 break; 426 } 427 } 428 429 int changed_region = -1; 430 for (int j = 0; j < _cur_covered_regions; j++) { 431 if (_covered[j].end() == new_region.end()) { 432 changed_region = j; 433 // This is a case where the covered region is growing or shrinking 434 // at the start of the region. 435 assert(changed_region != -1, "Don't expect to add a covered region"); 436 assert(_covered[changed_region].byte_size() != new_region.byte_size(), 437 "The sizes should be different here"); 438 resize_covered_region_by_end(changed_region, new_region); 439 return; 440 } 441 } 442 // This should only be a new covered region (where no existing 443 // covered region matches at the start or the end). 444 assert(_cur_covered_regions < _max_covered_regions, 445 "An existing region should have been found"); 446 resize_covered_region_by_start(new_region); 447 } 448 449 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { 450 CardTableModRefBS::resize_covered_region(new_region); 451 debug_only(verify_guard();) 452 } 453 454 void CardTableExtension::resize_covered_region_by_end(int changed_region, 455 MemRegion new_region) { 456 assert(SafepointSynchronize::is_at_safepoint(), 457 "Only expect an expansion at the low end at a GC"); 458 debug_only(verify_guard();) 459 #ifdef ASSERT 460 for (int k = 0; k < _cur_covered_regions; k++) { 461 if (_covered[k].end() == new_region.end()) { 462 assert(changed_region == k, "Changed region is incorrect"); 463 break; 464 } 465 } 466 #endif 467 468 // Commit new or uncommit old pages, if necessary. 469 if (resize_commit_uncommit(changed_region, new_region)) { 470 // Set the new start of the committed region 471 resize_update_committed_table(changed_region, new_region); 472 } 473 474 // Update card table entries 475 resize_update_card_table_entries(changed_region, new_region); 476 477 // Update the covered region 478 resize_update_covered_table(changed_region, new_region); 479 480 if (TraceCardTableModRefBS) { 481 int ind = changed_region; 482 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); 483 gclog_or_tty->print_cr(" " 484 " _covered[%d].start(): " INTPTR_FORMAT 485 " _covered[%d].last(): " INTPTR_FORMAT, 486 ind, p2i(_covered[ind].start()), 487 ind, p2i(_covered[ind].last())); 488 gclog_or_tty->print_cr(" " 489 " _committed[%d].start(): " INTPTR_FORMAT 490 " _committed[%d].last(): " INTPTR_FORMAT, 491 ind, p2i(_committed[ind].start()), 492 ind, p2i(_committed[ind].last())); 493 gclog_or_tty->print_cr(" " 494 " byte_for(start): " INTPTR_FORMAT 495 " byte_for(last): " INTPTR_FORMAT, 496 p2i(byte_for(_covered[ind].start())), 497 p2i(byte_for(_covered[ind].last()))); 498 gclog_or_tty->print_cr(" " 499 " addr_for(start): " INTPTR_FORMAT 500 " addr_for(last): " INTPTR_FORMAT, 501 p2i(addr_for((jbyte*) _committed[ind].start())), 502 p2i(addr_for((jbyte*) _committed[ind].last()))); 503 } 504 debug_only(verify_guard();) 505 } 506 507 bool CardTableExtension::resize_commit_uncommit(int changed_region, 508 MemRegion new_region) { 509 bool result = false; 510 // Commit new or uncommit old pages, if necessary. 511 MemRegion cur_committed = _committed[changed_region]; 512 assert(_covered[changed_region].end() == new_region.end(), 513 "The ends of the regions are expected to match"); 514 // Extend the start of this _committed region to 515 // to cover the start of any previous _committed region. 516 // This forms overlapping regions, but never interior regions. 517 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); 518 if (min_prev_start < cur_committed.start()) { 519 // Only really need to set start of "cur_committed" to 520 // the new start (min_prev_start) but assertion checking code 521 // below use cur_committed.end() so make it correct. 522 MemRegion new_committed = 523 MemRegion(min_prev_start, cur_committed.end()); 524 cur_committed = new_committed; 525 } 526 #ifdef ASSERT 527 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 528 assert(cur_committed.start() == 529 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), 530 os::vm_page_size()), 531 "Starts should have proper alignment"); 532 #endif 533 534 jbyte* new_start = byte_for(new_region.start()); 535 // Round down because this is for the start address 536 HeapWord* new_start_aligned = 537 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); 538 // The guard page is always committed and should not be committed over. 539 // This method is used in cases where the generation is growing toward 540 // lower addresses but the guard region is still at the end of the 541 // card table. That still makes sense when looking for writes 542 // off the end of the card table. 543 if (new_start_aligned < cur_committed.start()) { 544 // Expand the committed region 545 // 546 // Case A 547 // |+ guard +| 548 // |+ cur committed +++++++++| 549 // |+ new committed +++++++++++++++++| 550 // 551 // Case B 552 // |+ guard +| 553 // |+ cur committed +| 554 // |+ new committed +++++++| 555 // 556 // These are not expected because the calculation of the 557 // cur committed region and the new committed region 558 // share the same end for the covered region. 559 // Case C 560 // |+ guard +| 561 // |+ cur committed +| 562 // |+ new committed +++++++++++++++++| 563 // Case D 564 // |+ guard +| 565 // |+ cur committed +++++++++++| 566 // |+ new committed +++++++| 567 568 HeapWord* new_end_for_commit = 569 MIN2(cur_committed.end(), _guard_region.start()); 570 if(new_start_aligned < new_end_for_commit) { 571 MemRegion new_committed = 572 MemRegion(new_start_aligned, new_end_for_commit); 573 os::commit_memory_or_exit((char*)new_committed.start(), 574 new_committed.byte_size(), !ExecMem, 575 "card table expansion"); 576 } 577 result = true; 578 } else if (new_start_aligned > cur_committed.start()) { 579 // Shrink the committed region 580 #if 0 // uncommitting space is currently unsafe because of the interactions 581 // of growing and shrinking regions. One region A can uncommit space 582 // that it owns but which is being used by another region B (maybe). 583 // Region B has not committed the space because it was already 584 // committed by region A. 585 MemRegion uncommit_region = committed_unique_to_self(changed_region, 586 MemRegion(cur_committed.start(), new_start_aligned)); 587 if (!uncommit_region.is_empty()) { 588 if (!os::uncommit_memory((char*)uncommit_region.start(), 589 uncommit_region.byte_size())) { 590 // If the uncommit fails, ignore it. Let the 591 // committed table resizing go even though the committed 592 // table will over state the committed space. 593 } 594 } 595 #else 596 assert(!result, "Should be false with current workaround"); 597 #endif 598 } 599 assert(_committed[changed_region].end() == cur_committed.end(), 600 "end should not change"); 601 return result; 602 } 603 604 void CardTableExtension::resize_update_committed_table(int changed_region, 605 MemRegion new_region) { 606 607 jbyte* new_start = byte_for(new_region.start()); 608 // Set the new start of the committed region 609 HeapWord* new_start_aligned = 610 (HeapWord*)align_size_down((uintptr_t)new_start, 611 os::vm_page_size()); 612 MemRegion new_committed = MemRegion(new_start_aligned, 613 _committed[changed_region].end()); 614 _committed[changed_region] = new_committed; 615 _committed[changed_region].set_start(new_start_aligned); 616 } 617 618 void CardTableExtension::resize_update_card_table_entries(int changed_region, 619 MemRegion new_region) { 620 debug_only(verify_guard();) 621 MemRegion original_covered = _covered[changed_region]; 622 // Initialize the card entries. Only consider the 623 // region covered by the card table (_whole_heap) 624 jbyte* entry; 625 if (new_region.start() < _whole_heap.start()) { 626 entry = byte_for(_whole_heap.start()); 627 } else { 628 entry = byte_for(new_region.start()); 629 } 630 jbyte* end = byte_for(original_covered.start()); 631 // If _whole_heap starts at the original covered regions start, 632 // this loop will not execute. 633 while (entry < end) { *entry++ = clean_card; } 634 } 635 636 void CardTableExtension::resize_update_covered_table(int changed_region, 637 MemRegion new_region) { 638 // Update the covered region 639 _covered[changed_region].set_start(new_region.start()); 640 _covered[changed_region].set_word_size(new_region.word_size()); 641 642 // reorder regions. There should only be at most 1 out 643 // of order. 644 for (int i = _cur_covered_regions-1 ; i > 0; i--) { 645 if (_covered[i].start() < _covered[i-1].start()) { 646 MemRegion covered_mr = _covered[i-1]; 647 _covered[i-1] = _covered[i]; 648 _covered[i] = covered_mr; 649 MemRegion committed_mr = _committed[i-1]; 650 _committed[i-1] = _committed[i]; 651 _committed[i] = committed_mr; 652 break; 653 } 654 } 655 #ifdef ASSERT 656 for (int m = 0; m < _cur_covered_regions-1; m++) { 657 assert(_covered[m].start() <= _covered[m+1].start(), 658 "Covered regions out of order"); 659 assert(_committed[m].start() <= _committed[m+1].start(), 660 "Committed regions out of order"); 661 } 662 #endif 663 } 664 665 // Returns the start of any committed region that is lower than 666 // the target committed region (index ind) and that intersects the 667 // target region. If none, return start of target region. 668 // 669 // ------------- 670 // | | 671 // ------------- 672 // ------------ 673 // | target | 674 // ------------ 675 // ------------- 676 // | | 677 // ------------- 678 // ^ returns this 679 // 680 // ------------- 681 // | | 682 // ------------- 683 // ------------ 684 // | target | 685 // ------------ 686 // ------------- 687 // | | 688 // ------------- 689 // ^ returns this 690 691 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { 692 assert(_cur_covered_regions >= 0, "Expecting at least on region"); 693 HeapWord* min_start = _committed[ind].start(); 694 for (int j = 0; j < ind; j++) { 695 HeapWord* this_start = _committed[j].start(); 696 if ((this_start < min_start) && 697 !(_committed[j].intersection(_committed[ind])).is_empty()) { 698 min_start = this_start; 699 } 700 } 701 return min_start; 702 }