src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6673 : 8048112: G1 Full GC needs to support the case when the very first region is not available
Summary: To allow uncommit of regions within the heap, G1 Full GC should correctly handle the case when the very first region is not available (uncommitted). Provide support for that by lazily initializing the compaction point during iteration of the list of heap regions. Further refactor the code to let the G1CollectedHeap handle finding the next region to compact into.
Reviewed-by:
rev 6674 : imported patch fixes-kim


2936     if (cl->doHeapRegion(cur) && false) {
2937       cl->incomplete();
2938       return;
2939     }
2940     cur = next;
2941   }
2942   cur = g1_policy()->collection_set();
2943   while (cur != r) {
2944     HeapRegion* next = cur->next_in_collection_set();
2945     if (cl->doHeapRegion(cur) && false) {
2946       cl->incomplete();
2947       return;
2948     }
2949     cur = next;
2950   }
2951 }
2952 
2953 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2954   // We're not using an iterator given that it will wrap around when
2955   // it reaches the last region and this is not what we want here.
2956   uint index = from->hrs_index() + 1;
2957   while (index < n_regions()) {
2958     HeapRegion* hr = region_at(index);
2959     if (!hr->isHumongous()) {
2960       return hr;
2961     }
2962     index += 1;
2963   }
2964   return NULL;
2965 }
2966 
2967 Space* G1CollectedHeap::space_containing(const void* addr) const {
2968   return heap_region_containing(addr);
2969 }
2970 
2971 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2972   Space* sp = space_containing(addr);
2973   return sp->block_start(addr);
2974 }
2975 
2976 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2977   Space* sp = space_containing(addr);
2978   return sp->block_size(addr);
2979 }
2980 
2981 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2982   Space* sp = space_containing(addr);


6596                            HeapRegionSet* old_set, FreeRegionList* free_list) :
6597     _free_list_only(free_list_only),
6598     _old_set(old_set), _free_list(free_list), _total_used(0) {
6599     assert(_free_list->is_empty(), "pre-condition");
6600     if (!free_list_only) {
6601       assert(_old_set->is_empty(), "pre-condition");
6602     }
6603   }
6604 
6605   bool doHeapRegion(HeapRegion* r) {
6606     if (r->continuesHumongous()) {
6607       return false;
6608     }
6609 
6610     if (r->is_empty()) {
6611       // Add free regions to the free list
6612       _free_list->add_as_tail(r);
6613     } else if (!_free_list_only) {
6614       assert(!r->is_young(), "we should not come across young regions");
6615 
6616       if (r->startsHumongous()) {
6617         // We ignore humongous regions, we left the humongous set unchanged
6618       } else if (r->continuesHumongous()) {
6619       } else {
6620         // The rest should be old, add them to the old set
6621         _old_set->add(r);
6622       }
6623       _total_used += r->used();
6624     }
6625 
6626     return false;
6627   }
6628 
6629   size_t total_used() {
6630     return _total_used;
6631   }
6632 };
6633 
6634 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6635   assert_at_safepoint(true /* should_be_vm_thread */);
6636 
6637   if (!free_list_only) {
6638     _young_list->empty_list();




2936     if (cl->doHeapRegion(cur) && false) {
2937       cl->incomplete();
2938       return;
2939     }
2940     cur = next;
2941   }
2942   cur = g1_policy()->collection_set();
2943   while (cur != r) {
2944     HeapRegion* next = cur->next_in_collection_set();
2945     if (cl->doHeapRegion(cur) && false) {
2946       cl->incomplete();
2947       return;
2948     }
2949     cur = next;
2950   }
2951 }
2952 
2953 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2954   // We're not using an iterator given that it will wrap around when
2955   // it reaches the last region and this is not what we want here.
2956   for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {

2957     HeapRegion* hr = region_at(index);
2958     if (!hr->isHumongous()) {
2959       return hr;
2960     }

2961   }
2962   return NULL;
2963 }
2964 
2965 Space* G1CollectedHeap::space_containing(const void* addr) const {
2966   return heap_region_containing(addr);
2967 }
2968 
2969 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2970   Space* sp = space_containing(addr);
2971   return sp->block_start(addr);
2972 }
2973 
2974 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2975   Space* sp = space_containing(addr);
2976   return sp->block_size(addr);
2977 }
2978 
2979 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2980   Space* sp = space_containing(addr);


6594                            HeapRegionSet* old_set, FreeRegionList* free_list) :
6595     _free_list_only(free_list_only),
6596     _old_set(old_set), _free_list(free_list), _total_used(0) {
6597     assert(_free_list->is_empty(), "pre-condition");
6598     if (!free_list_only) {
6599       assert(_old_set->is_empty(), "pre-condition");
6600     }
6601   }
6602 
6603   bool doHeapRegion(HeapRegion* r) {
6604     if (r->continuesHumongous()) {
6605       return false;
6606     }
6607 
6608     if (r->is_empty()) {
6609       // Add free regions to the free list
6610       _free_list->add_as_tail(r);
6611     } else if (!_free_list_only) {
6612       assert(!r->is_young(), "we should not come across young regions");
6613 
6614       if (r->isHumongous()) {
6615         // We ignore humongous regions, we left the humongous set unchanged

6616       } else {
6617         // The rest should be old, add them to the old set
6618         _old_set->add(r);
6619       }
6620       _total_used += r->used();
6621     }
6622 
6623     return false;
6624   }
6625 
6626   size_t total_used() {
6627     return _total_used;
6628   }
6629 };
6630 
6631 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6632   assert_at_safepoint(true /* should_be_vm_thread */);
6633 
6634   if (!free_list_only) {
6635     _young_list->empty_list();