< prev index next >

src/share/vm/memory/cardTableModRefBS.cpp

Print this page
rev 7399 : 8059066: CardTableModRefBS might commit the same page twice
rev 7400 : 8059066: CardTableModRefBS might commit the same page twice (01)


 258   if (new_region.word_size() != old_region.word_size()) {
 259     // Commit new or uncommit old pages, if necessary.
 260     MemRegion cur_committed = _committed[ind];
 261     // Extend the end of this _committed region
 262     // to cover the end of any lower _committed regions.
 263     // This forms overlapping regions, but never interior regions.
 264     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 265     if (max_prev_end > cur_committed.end()) {
 266       cur_committed.set_end(max_prev_end);
 267     }
 268     // Align the end up to a page size (starts are already aligned).
 269     jbyte* const new_end = byte_after(new_region.last());
 270     HeapWord* new_end_aligned =
 271       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 272     assert(new_end_aligned >= (HeapWord*) new_end,
 273            "align up, but less");
 274     // Check the other regions (excludes "ind") to ensure that
 275     // the new_end_aligned does not intrude onto the committed
 276     // space of another region.
 277     int ri = 0;
 278     for (ri = 0; ri < _cur_covered_regions; ri++) {
 279       if (ri != ind) {
 280         if (_committed[ri].contains(new_end_aligned)) {
 281           // The prior check included in the assert
 282           // (new_end_aligned >= _committed[ri].start())
 283           // is redundant with the "contains" test.
 284           // Any region containing the new end
 285           // should start at or beyond the region found (ind)
 286           // for the new end (committed regions are not expected to
 287           // be proper subsets of other committed regions).
 288           assert(_committed[ri].start() >= _committed[ind].start(),
 289                  "New end of committed region is inconsistent");
 290           new_end_aligned = _committed[ri].start();
 291           // new_end_aligned can be equal to the start of its
 292           // committed region (i.e., of "ind") if a second
 293           // region following "ind" also start at the same location
 294           // as "ind".
 295           assert(new_end_aligned >= _committed[ind].start(),
 296             "New end of committed region is before start");
 297           debug_only(collided = true;)
 298           // Should only collide with 1 region
 299           break;
 300         }
 301       }
 302     }
 303 #ifdef ASSERT
 304     for (++ri; ri < _cur_covered_regions; ri++) {
 305       assert(!_committed[ri].contains(new_end_aligned),
 306         "New end of committed region is in a second committed region");
 307     }
 308 #endif
 309     // The guard page is always committed and should not be committed over.
 310     // "guarded" is used for assertion checking below and recalls the fact
 311     // that the would-be end of the new committed region would have
 312     // penetrated the guard page.
 313     HeapWord* new_end_for_commit = new_end_aligned;
 314 
 315     DEBUG_ONLY(bool guarded = false;)
 316     if (new_end_for_commit > _guard_region.start()) {
 317       new_end_for_commit = _guard_region.start();
 318       DEBUG_ONLY(guarded = true;)
 319     }
 320 




 258   if (new_region.word_size() != old_region.word_size()) {
 259     // Commit new or uncommit old pages, if necessary.
 260     MemRegion cur_committed = _committed[ind];
 261     // Extend the end of this _committed region
 262     // to cover the end of any lower _committed regions.
 263     // This forms overlapping regions, but never interior regions.
 264     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 265     if (max_prev_end > cur_committed.end()) {
 266       cur_committed.set_end(max_prev_end);
 267     }
 268     // Align the end up to a page size (starts are already aligned).
 269     jbyte* const new_end = byte_after(new_region.last());
 270     HeapWord* new_end_aligned =
 271       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
 272     assert(new_end_aligned >= (HeapWord*) new_end,
 273            "align up, but less");
 274     // Check the other regions (excludes "ind") to ensure that
 275     // the new_end_aligned does not intrude onto the committed
 276     // space of another region.
 277     int ri = 0;
 278     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
 279       if (new_end_aligned > _committed[ri].start()) {
 280         assert(new_end_aligned <= _committed[ri].end(),
 281                "An earlier committed region can't cover a later committed region");


 282         // Any region containing the new end
 283         // should start at or beyond the region found (ind)
 284         // for the new end (committed regions are not expected to
 285         // be proper subsets of other committed regions).
 286         assert(_committed[ri].start() >= _committed[ind].start(),
 287                "New end of committed region is inconsistent");
 288         new_end_aligned = _committed[ri].start();
 289         // new_end_aligned can be equal to the start of its
 290         // committed region (i.e., of "ind") if a second
 291         // region following "ind" also start at the same location
 292         // as "ind".
 293         assert(new_end_aligned >= _committed[ind].start(),
 294           "New end of committed region is before start");
 295         debug_only(collided = true;)
 296         // Should only collide with 1 region
 297         break;

 298       }
 299     }
 300 #ifdef ASSERT
 301     for (++ri; ri < _cur_covered_regions; ri++) {
 302       assert(!_committed[ri].contains(new_end_aligned),
 303         "New end of committed region is in a second committed region");
 304     }
 305 #endif
 306     // The guard page is always committed and should not be committed over.
 307     // "guarded" is used for assertion checking below and recalls the fact
 308     // that the would-be end of the new committed region would have
 309     // penetrated the guard page.
 310     HeapWord* new_end_for_commit = new_end_aligned;
 311 
 312     DEBUG_ONLY(bool guarded = false;)
 313     if (new_end_for_commit > _guard_region.start()) {
 314       new_end_for_commit = _guard_region.start();
 315       DEBUG_ONLY(guarded = true;)
 316     }
 317 


< prev index next >