< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 51611 : 8214118: HeapRegions marked as archive even if CDS mapping fails
Reviewed-by: tschatzl, jiangli


 735          "be called for humongous allocation requests");
 736 
 737   HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
 738 
 739   if (result == NULL) {
 740     *actual_word_size = desired_word_size;
 741     result = attempt_allocation_slow(desired_word_size);
 742   }
 743 
 744   assert_heap_not_locked();
 745   if (result != NULL) {
 746     assert(*actual_word_size != 0, "Actual size must have been set here");
 747     dirty_young_block(result, *actual_word_size);
 748   } else {
 749     *actual_word_size = 0;
 750   }
 751 
 752   return result;
 753 }
 754 
 755 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
 756   assert(!is_init_completed(), "Expect to be called at JVM init time");
 757   assert(ranges != NULL, "MemRegion array NULL");
 758   assert(count != 0, "No MemRegions provided");
 759   MemRegion reserved = _hrm.reserved();
 760   HeapWord* prev_last_addr = NULL;
 761   HeapRegion* prev_last_region = NULL;
 762   size_t size_used = 0;
 763   size_t uncommitted_regions = 0;
 764 
 765   // For each Memregion, free the G1 regions that constitute it, and
 766   // notify mark-sweep that the range is no longer to be considered 'archive.'
 767   MutexLockerEx x(Heap_lock);
 768   for (size_t i = 0; i < count; i++) {
 769     HeapWord* start_address = ranges[i].start();
 770     HeapWord* last_address = ranges[i].last();
 771 
 772     assert(reserved.contains(start_address) && reserved.contains(last_address),
 773            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 774            p2i(start_address), p2i(last_address));
 775     assert(start_address > prev_last_addr,


 797     // After verifying that each region was marked as an archive region by
 798     // alloc_archive_regions, set it free and empty and uncommit it.
 799     HeapRegion* curr_region = start_region;
 800     while (curr_region != NULL) {
 801       guarantee(curr_region->is_archive(),
 802                 "Expected archive region at index %u", curr_region->hrm_index());
 803       uint curr_index = curr_region->hrm_index();
 804       _old_set.remove(curr_region);
 805       curr_region->set_free();
 806       curr_region->set_top(curr_region->bottom());
 807       if (curr_region != last_region) {
 808         curr_region = _hrm.next_region_in_heap(curr_region);
 809       } else {
 810         curr_region = NULL;
 811       }
 812       _hrm.shrink_at(curr_index, 1);
 813       uncommitted_regions++;
 814     }
 815 
 816     // Notify mark-sweep that this is no longer an archive range.
 817     G1ArchiveAllocator::set_range_archive(ranges[i], false);
 818   }
 819 
 820   if (uncommitted_regions != 0) {
 821     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
 822                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 823   }
 824   decrease_used(size_used);
 825 }
 826 
 827 oop G1CollectedHeap::materialize_archived_object(oop obj) {
 828   assert(obj != NULL, "archived obj is NULL");
 829   assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
 830 
 831   // Loading an archived object makes it strongly reachable. If it is
 832   // loaded during concurrent marking, it must be enqueued to the SATB
 833   // queue, shading the previously white object gray.
 834   G1BarrierSet::enqueue(obj);
 835 
 836   return obj;
 837 }




 735          "be called for humongous allocation requests");
 736 
 737   HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
 738 
 739   if (result == NULL) {
 740     *actual_word_size = desired_word_size;
 741     result = attempt_allocation_slow(desired_word_size);
 742   }
 743 
 744   assert_heap_not_locked();
 745   if (result != NULL) {
 746     assert(*actual_word_size != 0, "Actual size must have been set here");
 747     dirty_young_block(result, *actual_word_size);
 748   } else {
 749     *actual_word_size = 0;
 750   }
 751 
 752   return result;
 753 }
 754 
 755 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, bool is_open) {
 756   assert(!is_init_completed(), "Expect to be called at JVM init time");
 757   assert(ranges != NULL, "MemRegion array NULL");
 758   assert(count != 0, "No MemRegions provided");
 759   MemRegion reserved = _hrm.reserved();
 760   HeapWord* prev_last_addr = NULL;
 761   HeapRegion* prev_last_region = NULL;
 762   size_t size_used = 0;
 763   size_t uncommitted_regions = 0;
 764 
 765   // For each Memregion, free the G1 regions that constitute it, and
 766   // notify mark-sweep that the range is no longer to be considered 'archive.'
 767   MutexLockerEx x(Heap_lock);
 768   for (size_t i = 0; i < count; i++) {
 769     HeapWord* start_address = ranges[i].start();
 770     HeapWord* last_address = ranges[i].last();
 771 
 772     assert(reserved.contains(start_address) && reserved.contains(last_address),
 773            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 774            p2i(start_address), p2i(last_address));
 775     assert(start_address > prev_last_addr,


 797     // After verifying that each region was marked as an archive region by
 798     // alloc_archive_regions, set it free and empty and uncommit it.
 799     HeapRegion* curr_region = start_region;
 800     while (curr_region != NULL) {
 801       guarantee(curr_region->is_archive(),
 802                 "Expected archive region at index %u", curr_region->hrm_index());
 803       uint curr_index = curr_region->hrm_index();
 804       _old_set.remove(curr_region);
 805       curr_region->set_free();
 806       curr_region->set_top(curr_region->bottom());
 807       if (curr_region != last_region) {
 808         curr_region = _hrm.next_region_in_heap(curr_region);
 809       } else {
 810         curr_region = NULL;
 811       }
 812       _hrm.shrink_at(curr_index, 1);
 813       uncommitted_regions++;
 814     }
 815 
 816     // Notify mark-sweep that this is no longer an archive range.
 817     G1ArchiveAllocator::clear_range_archive(ranges[i], is_open);
 818   }
 819 
 820   if (uncommitted_regions != 0) {
 821     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
 822                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 823   }
 824   decrease_used(size_used);
 825 }
 826 
 827 oop G1CollectedHeap::materialize_archived_object(oop obj) {
 828   assert(obj != NULL, "archived obj is NULL");
 829   assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
 830 
 831   // Loading an archived object makes it strongly reachable. If it is
 832   // loaded during concurrent marking, it must be enqueued to the SATB
 833   // queue, shading the previously white object gray.
 834   G1BarrierSet::enqueue(obj);
 835 
 836   return obj;
 837 }


< prev index next >