1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  29 #include "gc_implementation/parallelScavenge/psTasks.hpp"
  30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "oops/oop.psgc.inline.hpp"
  33 
  34 // Checks an individual oop for missing precise marks. Mark
  35 // may be either dirty or newgen.
  36 class CheckForUnmarkedOops : public OopClosure {
  37  private:
  38   PSYoungGen*         _young_gen;
  39   CardTableExtension* _card_table;
  40   HeapWord*           _unmarked_addr;
  41   jbyte*              _unmarked_card;
  42 
  43  protected:
  44   template <class T> void do_oop_work(T* p) {
  45     oop obj = oopDesc::load_decode_heap_oop(p);
  46     if (_young_gen->is_in_reserved(obj) &&
  47         !_card_table->addr_is_marked_imprecise(p)) {
  48       // Don't overwrite the first missing card mark
  49       if (_unmarked_addr == NULL) {
  50         _unmarked_addr = (HeapWord*)p;
  51         _unmarked_card = _card_table->byte_for(p);
  52       }
  53     }
  54   }
  55 
  56  public:
  57   CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
  58     _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
  59 
  60   virtual void do_oop(oop* p)       { CheckForUnmarkedOops::do_oop_work(p); }
  61   virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
  62 
  63   bool has_unmarked_oop() {
  64     return _unmarked_addr != NULL;
  65   }
  66 };
  67 
  68 // Checks all objects for the existance of some type of mark,
  69 // precise or imprecise, dirty or newgen.
  70 class CheckForUnmarkedObjects : public ObjectClosure {
  71  private:
  72   PSYoungGen*         _young_gen;
  73   CardTableExtension* _card_table;
  74 
  75  public:
  76   CheckForUnmarkedObjects() {
  77     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  78     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  79 
  80     _young_gen = heap->young_gen();
  81     _card_table = (CardTableExtension*)heap->barrier_set();
  82     // No point in asserting barrier set type here. Need to make CardTableExtension
  83     // a unique barrier set type.
  84   }
  85 
  86   // Card marks are not precise. The current system can leave us with
  87   // a mismash of precise marks and beginning of object marks. This means
  88   // we test for missing precise marks first. If any are found, we don't
  89   // fail unless the object head is also unmarked.
  90   virtual void do_object(oop obj) {
  91     CheckForUnmarkedOops object_check(_young_gen, _card_table);
  92     obj->oop_iterate_no_header(&object_check);
  93     if (object_check.has_unmarked_oop()) {
  94       assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
  95     }
  96   }
  97 };
  98 
  99 // Checks for precise marking of oops as newgen.
 100 class CheckForPreciseMarks : public OopClosure {
 101  private:
 102   PSYoungGen*         _young_gen;
 103   CardTableExtension* _card_table;
 104 
 105  protected:
 106   template <class T> void do_oop_work(T* p) {
 107     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 108     if (_young_gen->is_in_reserved(obj)) {
 109       assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
 110       _card_table->set_card_newgen(p);
 111     }
 112   }
 113 
 114  public:
 115   CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
 116     _young_gen(young_gen), _card_table(card_table) { }
 117 
 118   virtual void do_oop(oop* p)       { CheckForPreciseMarks::do_oop_work(p); }
 119   virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
 120 };
 121 
 122 // We get passed the space_top value to prevent us from traversing into
 123 // the old_gen promotion labs, which cannot be safely parsed.
 124 
 125 // Do not call this method if the space is empty.
 126 // It is a waste to start tasks and get here only to
 127 // do no work.  If this method needs to be called
 128 // when the space is empty, fix the calculation of
 129 // end_card to allow sp_top == sp->bottom().
 130 
 131 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
 132                                                     MutableSpace* sp,
 133                                                     HeapWord* space_top,
 134                                                     PSPromotionManager* pm,
 135                                                     uint stripe_number,
 136                                                     uint stripe_total) {
 137   int ssize = 128; // Naked constant!  Work unit = 64k.
 138   int dirty_card_count = 0;
 139 
 140   // It is a waste to get here if empty.
 141   assert(sp->bottom() < sp->top(), "Should not be called if empty");
 142   oop* sp_top = (oop*)space_top;
 143   jbyte* start_card = byte_for(sp->bottom());
 144   jbyte* end_card   = byte_for(sp_top - 1) + 1;
 145   oop* last_scanned = NULL; // Prevent scanning objects more than once
 146   // The width of the stripe ssize*stripe_total must be
 147   // consistent with the number of stripes so that the complete slice
 148   // is covered.
 149   size_t slice_width = ssize * stripe_total;
 150   for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
 151     jbyte* worker_start_card = slice + stripe_number * ssize;
 152     if (worker_start_card >= end_card)
 153       return; // We're done.
 154 
 155     jbyte* worker_end_card = worker_start_card + ssize;
 156     if (worker_end_card > end_card)
 157       worker_end_card = end_card;
 158 
 159     // We do not want to scan objects more than once. In order to accomplish
 160     // this, we assert that any object with an object head inside our 'slice'
 161     // belongs to us. We may need to extend the range of scanned cards if the
 162     // last object continues into the next 'slice'.
 163     //
 164     // Note! ending cards are exclusive!
 165     HeapWord* slice_start = addr_for(worker_start_card);
 166     HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
 167 
 168 #ifdef ASSERT
 169     if (GCWorkerDelayMillis > 0) {
 170       // Delay 1 worker so that it proceeds after all the work
 171       // has been completed.
 172       if (stripe_number < 2) {
 173         os::sleep(Thread::current(), GCWorkerDelayMillis, false);
 174       }
 175     }
 176 #endif
 177 
 178     // If there are not objects starting within the chunk, skip it.
 179     if (!start_array->object_starts_in_range(slice_start, slice_end)) {
 180       continue;
 181     }
 182     // Update our beginning addr
 183     HeapWord* first_object = start_array->object_start(slice_start);
 184     debug_only(oop* first_object_within_slice = (oop*) first_object;)
 185     if (first_object < slice_start) {
 186       last_scanned = (oop*)(first_object + oop(first_object)->size());
 187       debug_only(first_object_within_slice = last_scanned;)
 188       worker_start_card = byte_for(last_scanned);
 189     }
 190 
 191     // Update the ending addr
 192     if (slice_end < (HeapWord*)sp_top) {
 193       // The subtraction is important! An object may start precisely at slice_end.
 194       HeapWord* last_object = start_array->object_start(slice_end - 1);
 195       slice_end = last_object + oop(last_object)->size();
 196       // worker_end_card is exclusive, so bump it one past the end of last_object's
 197       // covered span.
 198       worker_end_card = byte_for(slice_end) + 1;
 199 
 200       if (worker_end_card > end_card)
 201         worker_end_card = end_card;
 202     }
 203 
 204     assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
 205     assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
 206     assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
 207     // Note that worker_start_card >= worker_end_card is legal, and happens when
 208     // an object spans an entire slice.
 209     assert(worker_start_card <= end_card, "worker start card beyond end card");
 210     assert(worker_end_card <= end_card, "worker end card beyond end card");
 211 
 212     jbyte* current_card = worker_start_card;
 213     while (current_card < worker_end_card) {
 214       // Find an unclean card.
 215       while (current_card < worker_end_card && card_is_clean(*current_card)) {
 216         current_card++;
 217       }
 218       jbyte* first_unclean_card = current_card;
 219 
 220       // Find the end of a run of contiguous unclean cards
 221       while (current_card < worker_end_card && !card_is_clean(*current_card)) {
 222         while (current_card < worker_end_card && !card_is_clean(*current_card)) {
 223           current_card++;
 224         }
 225 
 226         if (current_card < worker_end_card) {
 227           // Some objects may be large enough to span several cards. If such
 228           // an object has more than one dirty card, separated by a clean card,
 229           // we will attempt to scan it twice. The test against "last_scanned"
 230           // prevents the redundant object scan, but it does not prevent newly
 231           // marked cards from being cleaned.
 232           HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
 233           size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
 234           HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
 235           jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
 236           assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
 237           if (ending_card_of_last_object > current_card) {
 238             // This means the object spans the next complete card.
 239             // We need to bump the current_card to ending_card_of_last_object
 240             current_card = ending_card_of_last_object;
 241           }
 242         }
 243       }
 244       jbyte* following_clean_card = current_card;
 245 
 246       if (first_unclean_card < worker_end_card) {
 247         oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
 248         assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
 249         // "p" should always be >= "last_scanned" because newly GC dirtied
 250         // cards are no longer scanned again (see comment at end
 251         // of loop on the increment of "current_card").  Test that
 252         // hypothesis before removing this code.
 253         // If this code is removed, deal with the first time through
 254         // the loop when the last_scanned is the object starting in
 255         // the previous slice.
 256         assert((p >= last_scanned) ||
 257                (last_scanned == first_object_within_slice),
 258                "Should no longer be possible");
 259         if (p < last_scanned) {
 260           // Avoid scanning more than once; this can happen because
 261           // newgen cards set by GC may a different set than the
 262           // originally dirty set
 263           p = last_scanned;
 264         }
 265         oop* to = (oop*)addr_for(following_clean_card);
 266 
 267         // Test slice_end first!
 268         if ((HeapWord*)to > slice_end) {
 269           to = (oop*)slice_end;
 270         } else if (to > sp_top) {
 271           to = sp_top;
 272         }
 273 
 274         // we know which cards to scan, now clear them
 275         if (first_unclean_card <= worker_start_card+1)
 276           first_unclean_card = worker_start_card+1;
 277         if (following_clean_card >= worker_end_card-1)
 278           following_clean_card = worker_end_card-1;
 279 
 280         while (first_unclean_card < following_clean_card) {
 281           *first_unclean_card++ = clean_card;
 282         }
 283 
 284         const int interval = PrefetchScanIntervalInBytes;
 285         // scan all objects in the range
 286         if (interval != 0) {
 287           while (p < to) {
 288             Prefetch::write(p, interval);
 289             oop m = oop(p);
 290             assert(m->is_oop_or_null(), "check for header");
 291             m->push_contents(pm);
 292             p += m->size();
 293           }
 294           pm->drain_stacks_cond_depth();
 295         } else {
 296           while (p < to) {
 297             oop m = oop(p);
 298             assert(m->is_oop_or_null(), "check for header");
 299             m->push_contents(pm);
 300             p += m->size();
 301           }
 302           pm->drain_stacks_cond_depth();
 303         }
 304         last_scanned = p;
 305       }
 306       // "current_card" is still the "following_clean_card" or
 307       // the current_card is >= the worker_end_card so the
 308       // loop will not execute again.
 309       assert((current_card == following_clean_card) ||
 310              (current_card >= worker_end_card),
 311         "current_card should only be incremented if it still equals "
 312         "following_clean_card");
 313       // Increment current_card so that it is not processed again.
 314       // It may now be dirty because a old-to-young pointer was
 315       // found on it an updated.  If it is now dirty, it cannot be
 316       // be safely cleaned in the next iteration.
 317       current_card++;
 318     }
 319   }
 320 }
 321 
 322 // This should be called before a scavenge.
 323 void CardTableExtension::verify_all_young_refs_imprecise() {
 324   CheckForUnmarkedObjects check;
 325 
 326   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 327   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 328 
 329   PSOldGen* old_gen = heap->old_gen();
 330 
 331   old_gen->object_iterate(&check);
 332 }
 333 
 334 // This should be called immediately after a scavenge, before mutators resume.
 335 void CardTableExtension::verify_all_young_refs_precise() {
 336   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 337   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 338 
 339   PSOldGen* old_gen = heap->old_gen();
 340 
 341   CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
 342 
 343   old_gen->oop_iterate_no_header(&check);
 344 
 345   verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
 346 }
 347 
 348 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
 349   CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
 350   // FIX ME ASSERT HERE
 351 
 352   jbyte* bot = card_table->byte_for(mr.start());
 353   jbyte* top = card_table->byte_for(mr.end());
 354   while(bot <= top) {
 355     assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
 356     if (*bot == verify_card)
 357       *bot = youngergen_card;
 358     bot++;
 359   }
 360 }
 361 
 362 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
 363   jbyte* p = byte_for(addr);
 364   jbyte val = *p;
 365 
 366   if (card_is_dirty(val))
 367     return true;
 368 
 369   if (card_is_newgen(val))
 370     return true;
 371 
 372   if (card_is_clean(val))
 373     return false;
 374 
 375   assert(false, "Found unhandled card mark type");
 376 
 377   return false;
 378 }
 379 
 380 // Also includes verify_card
 381 bool CardTableExtension::addr_is_marked_precise(void *addr) {
 382   jbyte* p = byte_for(addr);
 383   jbyte val = *p;
 384 
 385   if (card_is_newgen(val))
 386     return true;
 387 
 388   if (card_is_verify(val))
 389     return true;
 390 
 391   if (card_is_clean(val))
 392     return false;
 393 
 394   if (card_is_dirty(val))
 395     return false;
 396 
 397   assert(false, "Found unhandled card mark type");
 398 
 399   return false;
 400 }
 401 
 402 // Assumes that only the base or the end changes.  This allows indentification
 403 // of the region that is being resized.  The
 404 // CardTableModRefBS::resize_covered_region() is used for the normal case
 405 // where the covered regions are growing or shrinking at the high end.
 406 // The method resize_covered_region_by_end() is analogous to
 407 // CardTableModRefBS::resize_covered_region() but
 408 // for regions that grow or shrink at the low end.
 409 void CardTableExtension::resize_covered_region(MemRegion new_region) {
 410 
 411   for (int i = 0; i < _cur_covered_regions; i++) {
 412     if (_covered[i].start() == new_region.start()) {
 413       // Found a covered region with the same start as the
 414       // new region.  The region is growing or shrinking
 415       // from the start of the region.
 416       resize_covered_region_by_start(new_region);
 417       return;
 418     }
 419     if (_covered[i].start() > new_region.start()) {
 420       break;
 421     }
 422   }
 423 
 424   int changed_region = -1;
 425   for (int j = 0; j < _cur_covered_regions; j++) {
 426     if (_covered[j].end() == new_region.end()) {
 427       changed_region = j;
 428       // This is a case where the covered region is growing or shrinking
 429       // at the start of the region.
 430       assert(changed_region != -1, "Don't expect to add a covered region");
 431       assert(_covered[changed_region].byte_size() != new_region.byte_size(),
 432         "The sizes should be different here");
 433       resize_covered_region_by_end(changed_region, new_region);
 434       return;
 435     }
 436   }
 437   // This should only be a new covered region (where no existing
 438   // covered region matches at the start or the end).
 439   assert(_cur_covered_regions < _max_covered_regions,
 440     "An existing region should have been found");
 441   resize_covered_region_by_start(new_region);
 442 }
 443 
 444 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
 445   CardTableModRefBS::resize_covered_region(new_region);
 446   debug_only(verify_guard();)
 447 }
 448 
 449 void CardTableExtension::resize_covered_region_by_end(int changed_region,
 450                                                       MemRegion new_region) {
 451   assert(SafepointSynchronize::is_at_safepoint(),
 452     "Only expect an expansion at the low end at a GC");
 453   debug_only(verify_guard();)
 454 #ifdef ASSERT
 455   for (int k = 0; k < _cur_covered_regions; k++) {
 456     if (_covered[k].end() == new_region.end()) {
 457       assert(changed_region == k, "Changed region is incorrect");
 458       break;
 459     }
 460   }
 461 #endif
 462 
 463   // Commit new or uncommit old pages, if necessary.
 464   if (resize_commit_uncommit(changed_region, new_region)) {
 465     // Set the new start of the committed region
 466     resize_update_committed_table(changed_region, new_region);
 467   }
 468 
 469   // Update card table entries
 470   resize_update_card_table_entries(changed_region, new_region);
 471 
 472   // Update the covered region
 473   resize_update_covered_table(changed_region, new_region);
 474 
 475   if (TraceCardTableModRefBS) {
 476     int ind = changed_region;
 477     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
 478     gclog_or_tty->print_cr("  "
 479                   "  _covered[%d].start(): " INTPTR_FORMAT
 480                   "  _covered[%d].last(): " INTPTR_FORMAT,
 481                   ind, _covered[ind].start(),
 482                   ind, _covered[ind].last());
 483     gclog_or_tty->print_cr("  "
 484                   "  _committed[%d].start(): " INTPTR_FORMAT
 485                   "  _committed[%d].last(): " INTPTR_FORMAT,
 486                   ind, _committed[ind].start(),
 487                   ind, _committed[ind].last());
 488     gclog_or_tty->print_cr("  "
 489                   "  byte_for(start): " INTPTR_FORMAT
 490                   "  byte_for(last): " INTPTR_FORMAT,
 491                   byte_for(_covered[ind].start()),
 492                   byte_for(_covered[ind].last()));
 493     gclog_or_tty->print_cr("  "
 494                   "  addr_for(start): " INTPTR_FORMAT
 495                   "  addr_for(last): " INTPTR_FORMAT,
 496                   addr_for((jbyte*) _committed[ind].start()),
 497                   addr_for((jbyte*) _committed[ind].last()));
 498   }
 499   debug_only(verify_guard();)
 500 }
 501 
 502 bool CardTableExtension::resize_commit_uncommit(int changed_region,
 503                                                 MemRegion new_region) {
 504   bool result = false;
 505   // Commit new or uncommit old pages, if necessary.
 506   MemRegion cur_committed = _committed[changed_region];
 507   assert(_covered[changed_region].end() == new_region.end(),
 508     "The ends of the regions are expected to match");
 509   // Extend the start of this _committed region to
 510   // to cover the start of any previous _committed region.
 511   // This forms overlapping regions, but never interior regions.
 512   HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
 513   if (min_prev_start < cur_committed.start()) {
 514     // Only really need to set start of "cur_committed" to
 515     // the new start (min_prev_start) but assertion checking code
 516     // below use cur_committed.end() so make it correct.
 517     MemRegion new_committed =
 518         MemRegion(min_prev_start, cur_committed.end());
 519     cur_committed = new_committed;
 520   }
 521 #ifdef ASSERT
 522   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 523   assert(cur_committed.start() ==
 524     (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
 525                               os::vm_page_size()),
 526     "Starts should have proper alignment");
 527 #endif
 528 
 529   jbyte* new_start = byte_for(new_region.start());
 530   // Round down because this is for the start address
 531   HeapWord* new_start_aligned =
 532     (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
 533   // The guard page is always committed and should not be committed over.
 534   // This method is used in cases where the generation is growing toward
 535   // lower addresses but the guard region is still at the end of the
 536   // card table.  That still makes sense when looking for writes
 537   // off the end of the card table.
 538   if (new_start_aligned < cur_committed.start()) {
 539     // Expand the committed region
 540     //
 541     // Case A
 542     //                                          |+ guard +|
 543     //                          |+ cur committed +++++++++|
 544     //                  |+ new committed +++++++++++++++++|
 545     //
 546     // Case B
 547     //                                          |+ guard +|
 548     //                        |+ cur committed +|
 549     //                  |+ new committed +++++++|
 550     //
 551     // These are not expected because the calculation of the
 552     // cur committed region and the new committed region
 553     // share the same end for the covered region.
 554     // Case C
 555     //                                          |+ guard +|
 556     //                        |+ cur committed +|
 557     //                  |+ new committed +++++++++++++++++|
 558     // Case D
 559     //                                          |+ guard +|
 560     //                        |+ cur committed +++++++++++|
 561     //                  |+ new committed +++++++|
 562 
 563     HeapWord* new_end_for_commit =
 564       MIN2(cur_committed.end(), _guard_region.start());
 565     if(new_start_aligned < new_end_for_commit) {
 566       MemRegion new_committed =
 567         MemRegion(new_start_aligned, new_end_for_commit);
 568       if (!os::commit_memory((char*)new_committed.start(),
 569                              new_committed.byte_size())) {
 570         vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
 571                               "card table expansion");
 572       }
 573     }
 574     result = true;
 575   } else if (new_start_aligned > cur_committed.start()) {
 576     // Shrink the committed region
 577 #if 0 // uncommitting space is currently unsafe because of the interactions
 578       // of growing and shrinking regions.  One region A can uncommit space
 579       // that it owns but which is being used by another region B (maybe).
 580       // Region B has not committed the space because it was already
 581       // committed by region A.
 582     MemRegion uncommit_region = committed_unique_to_self(changed_region,
 583       MemRegion(cur_committed.start(), new_start_aligned));
 584     if (!uncommit_region.is_empty()) {
 585       if (!os::uncommit_memory((char*)uncommit_region.start(),
 586                                uncommit_region.byte_size())) {
 587         // If the uncommit fails, ignore it.  Let the
 588         // committed table resizing go even though the committed
 589         // table will over state the committed space.
 590       }
 591     }
 592 #else
 593     assert(!result, "Should be false with current workaround");
 594 #endif
 595   }
 596   assert(_committed[changed_region].end() == cur_committed.end(),
 597     "end should not change");
 598   return result;
 599 }
 600 
 601 void CardTableExtension::resize_update_committed_table(int changed_region,
 602                                                        MemRegion new_region) {
 603 
 604   jbyte* new_start = byte_for(new_region.start());
 605   // Set the new start of the committed region
 606   HeapWord* new_start_aligned =
 607     (HeapWord*)align_size_down((uintptr_t)new_start,
 608                              os::vm_page_size());
 609   MemRegion new_committed = MemRegion(new_start_aligned,
 610     _committed[changed_region].end());
 611   _committed[changed_region] = new_committed;
 612   _committed[changed_region].set_start(new_start_aligned);
 613 }
 614 
 615 void CardTableExtension::resize_update_card_table_entries(int changed_region,
 616                                                           MemRegion new_region) {
 617   debug_only(verify_guard();)
 618   MemRegion original_covered = _covered[changed_region];
 619   // Initialize the card entries.  Only consider the
 620   // region covered by the card table (_whole_heap)
 621   jbyte* entry;
 622   if (new_region.start() < _whole_heap.start()) {
 623     entry = byte_for(_whole_heap.start());
 624   } else {
 625     entry = byte_for(new_region.start());
 626   }
 627   jbyte* end = byte_for(original_covered.start());
 628   // If _whole_heap starts at the original covered regions start,
 629   // this loop will not execute.
 630   while (entry < end) { *entry++ = clean_card; }
 631 }
 632 
 633 void CardTableExtension::resize_update_covered_table(int changed_region,
 634                                                      MemRegion new_region) {
 635   // Update the covered region
 636   _covered[changed_region].set_start(new_region.start());
 637   _covered[changed_region].set_word_size(new_region.word_size());
 638 
 639   // reorder regions.  There should only be at most 1 out
 640   // of order.
 641   for (int i = _cur_covered_regions-1 ; i > 0; i--) {
 642     if (_covered[i].start() < _covered[i-1].start()) {
 643         MemRegion covered_mr = _covered[i-1];
 644         _covered[i-1] = _covered[i];
 645         _covered[i] = covered_mr;
 646         MemRegion committed_mr = _committed[i-1];
 647       _committed[i-1] = _committed[i];
 648       _committed[i] = committed_mr;
 649       break;
 650     }
 651   }
 652 #ifdef ASSERT
 653   for (int m = 0; m < _cur_covered_regions-1; m++) {
 654     assert(_covered[m].start() <= _covered[m+1].start(),
 655       "Covered regions out of order");
 656     assert(_committed[m].start() <= _committed[m+1].start(),
 657       "Committed regions out of order");
 658   }
 659 #endif
 660 }
 661 
 662 // Returns the start of any committed region that is lower than
 663 // the target committed region (index ind) and that intersects the
 664 // target region.  If none, return start of target region.
 665 //
 666 //      -------------
 667 //      |           |
 668 //      -------------
 669 //              ------------
 670 //              | target   |
 671 //              ------------
 672 //                               -------------
 673 //                               |           |
 674 //                               -------------
 675 //      ^ returns this
 676 //
 677 //      -------------
 678 //      |           |
 679 //      -------------
 680 //                      ------------
 681 //                      | target   |
 682 //                      ------------
 683 //                               -------------
 684 //                               |           |
 685 //                               -------------
 686 //                      ^ returns this
 687 
 688 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
 689   assert(_cur_covered_regions >= 0, "Expecting at least on region");
 690   HeapWord* min_start = _committed[ind].start();
 691   for (int j = 0; j < ind; j++) {
 692     HeapWord* this_start = _committed[j].start();
 693     if ((this_start < min_start) &&
 694         !(_committed[j].intersection(_committed[ind])).is_empty()) {
 695        min_start = this_start;
 696     }
 697   }
 698   return min_start;
 699 }