< prev index next >

src/share/vm/gc/parallel/cardTableExtension.cpp

Print this page




 487                                                 MemRegion new_region) {
 488   bool result = false;
 489   // Commit new or uncommit old pages, if necessary.
 490   MemRegion cur_committed = _committed[changed_region];
 491   assert(_covered[changed_region].end() == new_region.end(),
 492     "The ends of the regions are expected to match");
 493   // Extend the start of this _committed region to
 494   // to cover the start of any previous _committed region.
 495   // This forms overlapping regions, but never interior regions.
 496   HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
 497   if (min_prev_start < cur_committed.start()) {
 498     // Only really need to set start of "cur_committed" to
 499     // the new start (min_prev_start) but assertion checking code
 500     // below use cur_committed.end() so make it correct.
 501     MemRegion new_committed =
 502         MemRegion(min_prev_start, cur_committed.end());
 503     cur_committed = new_committed;
 504   }
 505 #ifdef ASSERT
 506   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 507   assert(cur_committed.start() == align_ptr_up(cur_committed.start(), os::vm_page_size()),
 508     "Starts should have proper alignment");
 509 #endif
 510 
 511   jbyte* new_start = byte_for(new_region.start());
 512   // Round down because this is for the start address
 513   HeapWord* new_start_aligned =
 514     (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
 515   // The guard page is always committed and should not be committed over.
 516   // This method is used in cases where the generation is growing toward
 517   // lower addresses but the guard region is still at the end of the
 518   // card table.  That still makes sense when looking for writes
 519   // off the end of the card table.
 520   if (new_start_aligned < cur_committed.start()) {
 521     // Expand the committed region
 522     //
 523     // Case A
 524     //                                          |+ guard +|
 525     //                          |+ cur committed +++++++++|
 526     //                  |+ new committed +++++++++++++++++|
 527     //
 528     // Case B
 529     //                                          |+ guard +|
 530     //                        |+ cur committed +|
 531     //                  |+ new committed +++++++|
 532     //
 533     // These are not expected because the calculation of the
 534     // cur committed region and the new committed region


 567         // If the uncommit fails, ignore it.  Let the
 568         // committed table resizing go even though the committed
 569         // table will over state the committed space.
 570       }
 571     }
 572 #else
 573     assert(!result, "Should be false with current workaround");
 574 #endif
 575   }
 576   assert(_committed[changed_region].end() == cur_committed.end(),
 577     "end should not change");
 578   return result;
 579 }
 580 
 581 void CardTableExtension::resize_update_committed_table(int changed_region,
 582                                                        MemRegion new_region) {
 583 
 584   jbyte* new_start = byte_for(new_region.start());
 585   // Set the new start of the committed region
 586   HeapWord* new_start_aligned =
 587     (HeapWord*)align_ptr_down(new_start, os::vm_page_size());
 588   MemRegion new_committed = MemRegion(new_start_aligned,
 589     _committed[changed_region].end());
 590   _committed[changed_region] = new_committed;
 591   _committed[changed_region].set_start(new_start_aligned);
 592 }
 593 
 594 void CardTableExtension::resize_update_card_table_entries(int changed_region,
 595                                                           MemRegion new_region) {
 596   debug_only(verify_guard();)
 597   MemRegion original_covered = _covered[changed_region];
 598   // Initialize the card entries.  Only consider the
 599   // region covered by the card table (_whole_heap)
 600   jbyte* entry;
 601   if (new_region.start() < _whole_heap.start()) {
 602     entry = byte_for(_whole_heap.start());
 603   } else {
 604     entry = byte_for(new_region.start());
 605   }
 606   jbyte* end = byte_for(original_covered.start());
 607   // If _whole_heap starts at the original covered regions start,




 487                                                 MemRegion new_region) {
 488   bool result = false;
 489   // Commit new or uncommit old pages, if necessary.
 490   MemRegion cur_committed = _committed[changed_region];
 491   assert(_covered[changed_region].end() == new_region.end(),
 492     "The ends of the regions are expected to match");
 493   // Extend the start of this _committed region to
 494   // to cover the start of any previous _committed region.
 495   // This forms overlapping regions, but never interior regions.
 496   HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
 497   if (min_prev_start < cur_committed.start()) {
 498     // Only really need to set start of "cur_committed" to
 499     // the new start (min_prev_start) but assertion checking code
 500     // below use cur_committed.end() so make it correct.
 501     MemRegion new_committed =
 502         MemRegion(min_prev_start, cur_committed.end());
 503     cur_committed = new_committed;
 504   }
 505 #ifdef ASSERT
 506   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 507   assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
 508     "Starts should have proper alignment");
 509 #endif
 510 
 511   jbyte* new_start = byte_for(new_region.start());
 512   // Round down because this is for the start address
 513   HeapWord* new_start_aligned =
 514     (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
 515   // The guard page is always committed and should not be committed over.
 516   // This method is used in cases where the generation is growing toward
 517   // lower addresses but the guard region is still at the end of the
 518   // card table.  That still makes sense when looking for writes
 519   // off the end of the card table.
 520   if (new_start_aligned < cur_committed.start()) {
 521     // Expand the committed region
 522     //
 523     // Case A
 524     //                                          |+ guard +|
 525     //                          |+ cur committed +++++++++|
 526     //                  |+ new committed +++++++++++++++++|
 527     //
 528     // Case B
 529     //                                          |+ guard +|
 530     //                        |+ cur committed +|
 531     //                  |+ new committed +++++++|
 532     //
 533     // These are not expected because the calculation of the
 534     // cur committed region and the new committed region


 567         // If the uncommit fails, ignore it.  Let the
 568         // committed table resizing go even though the committed
 569         // table will over state the committed space.
 570       }
 571     }
 572 #else
 573     assert(!result, "Should be false with current workaround");
 574 #endif
 575   }
 576   assert(_committed[changed_region].end() == cur_committed.end(),
 577     "end should not change");
 578   return result;
 579 }
 580 
 581 void CardTableExtension::resize_update_committed_table(int changed_region,
 582                                                        MemRegion new_region) {
 583 
 584   jbyte* new_start = byte_for(new_region.start());
 585   // Set the new start of the committed region
 586   HeapWord* new_start_aligned =
 587     (HeapWord*)align_down(new_start, os::vm_page_size());
 588   MemRegion new_committed = MemRegion(new_start_aligned,
 589     _committed[changed_region].end());
 590   _committed[changed_region] = new_committed;
 591   _committed[changed_region].set_start(new_start_aligned);
 592 }
 593 
 594 void CardTableExtension::resize_update_card_table_entries(int changed_region,
 595                                                           MemRegion new_region) {
 596   debug_only(verify_guard();)
 597   MemRegion original_covered = _covered[changed_region];
 598   // Initialize the card entries.  Only consider the
 599   // region covered by the card table (_whole_heap)
 600   jbyte* entry;
 601   if (new_region.start() < _whole_heap.start()) {
 602     entry = byte_for(_whole_heap.start());
 603   } else {
 604     entry = byte_for(new_region.start());
 605   }
 606   jbyte* end = byte_for(original_covered.start());
 607   // If _whole_heap starts at the original covered regions start,


< prev index next >