< prev index next >

src/share/vm/gc/g1/heapRegionManager.hpp

Print this page




 133                     _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
 134   { }
 135 
 136   void initialize(G1RegionToSpaceMapper* heap_storage,
 137                   G1RegionToSpaceMapper* prev_bitmap,
 138                   G1RegionToSpaceMapper* next_bitmap,
 139                   G1RegionToSpaceMapper* bot,
 140                   G1RegionToSpaceMapper* cardtable,
 141                   G1RegionToSpaceMapper* card_counts);
 142 
 143   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
 144   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
 145   // the heap from the lowest address, this region (and its associated data
 146   // structures) are available and we do not need to check further.
 147   HeapRegion* get_dummy_region() { return new_heap_region(0); }
 148 
 149   // Return the HeapRegion at the given index. Assume that the index
 150   // is valid.
 151   inline HeapRegion* at(uint index) const;
 152 



 153   // If addr is within the committed space return its corresponding
 154   // HeapRegion, otherwise return NULL.
 155   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 156 




 157   // Insert the given region into the free region list.
 158   inline void insert_into_free_list(HeapRegion* hr);
 159 
 160   // Insert the given region list into the global free region list.
 161   void insert_list_into_free_list(FreeRegionList* list) {
 162     _free_list.add_ordered(list);
 163   }
 164 
 165   HeapRegion* allocate_free_region(bool is_old) {
 166     HeapRegion* hr = _free_list.remove_region(is_old);
 167 
 168     if (hr != NULL) {
 169       assert(hr->next() == NULL, "Single region should not have next");
 170       assert(is_available(hr->hrm_index()), "Must be committed");
 171     }
 172     return hr;
 173   }
 174 
 175   inline void allocate_free_regions_starting_at(uint first, uint num_regions);
 176 


 203 
 204   // Expand the sequence to reflect that the heap has grown. Either create new
 205   // HeapRegions, or re-use existing ones. Returns the number of regions the
 206   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
 207   // number of regions might be smaller than what's desired.
 208   uint expand_by(uint num_regions);
 209 
 210   // Makes sure that the regions from start to start+num_regions-1 are available
 211   // for allocation. Returns the number of regions that were committed to achieve
 212   // this.
 213   uint expand_at(uint start, uint num_regions);
 214 
 215   // Find a contiguous set of empty regions of length num. Returns the start index of
 216   // that set, or G1_NO_HRM_INDEX.
 217   uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
 218   // Find a contiguous set of empty or unavailable regions of length num. Returns the
 219   // start index of that set, or G1_NO_HRM_INDEX.
 220   uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 221 
 222   HeapRegion* next_region_in_heap(const HeapRegion* r) const;



 223 
 224   // Apply blk->doHeapRegion() on all committed regions in address order,
 225   // terminating the iteration early if doHeapRegion() returns true.
 226   void iterate(HeapRegionClosure* blk) const;
 227 
 228   void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
 229 
 230   // Uncommit up to num_regions_to_remove regions that are completely free.
 231   // Return the actual number of uncommitted regions.
 232   uint shrink_by(uint num_regions_to_remove);
 233 
 234   void verify();
 235 
 236   // Do some sanity checking.
 237   void verify_optional() PRODUCT_RETURN;
 238 };
 239 
 240 // The HeapRegionClaimer is used during parallel iteration over heap regions,
 241 // allowing workers to claim heap regions, gaining exclusive rights to these regions.
 242 class HeapRegionClaimer : public StackObj {




 133                     _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
 134   { }
 135 
 136   void initialize(G1RegionToSpaceMapper* heap_storage,
 137                   G1RegionToSpaceMapper* prev_bitmap,
 138                   G1RegionToSpaceMapper* next_bitmap,
 139                   G1RegionToSpaceMapper* bot,
 140                   G1RegionToSpaceMapper* cardtable,
 141                   G1RegionToSpaceMapper* card_counts);
 142 
 143   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
 144   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
 145   // the heap from the lowest address, this region (and its associated data
 146   // structures) are available and we do not need to check further.
 147   HeapRegion* get_dummy_region() { return new_heap_region(0); }
 148 
 149   // Return the HeapRegion at the given index. Assume that the index
 150   // is valid.
 151   inline HeapRegion* at(uint index) const;
 152 
 153   // Return the HeapRegion at the given index, or NULL.
 154   inline HeapRegion* at_or_null(uint index) const;
 155 
 156   // If addr is within the committed space return its corresponding
 157   // HeapRegion, otherwise return NULL.
 158   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 159 
 160   // If addr is within the maximum heap space, return its corresponding
 161   // HeapRegion index.
 162   inline uint addr_to_index(HeapWord* addr) const;
 163 
 164   // Insert the given region into the free region list.
 165   inline void insert_into_free_list(HeapRegion* hr);
 166 
 167   // Insert the given region list into the global free region list.
 168   void insert_list_into_free_list(FreeRegionList* list) {
 169     _free_list.add_ordered(list);
 170   }
 171 
 172   HeapRegion* allocate_free_region(bool is_old) {
 173     HeapRegion* hr = _free_list.remove_region(is_old);
 174 
 175     if (hr != NULL) {
 176       assert(hr->next() == NULL, "Single region should not have next");
 177       assert(is_available(hr->hrm_index()), "Must be committed");
 178     }
 179     return hr;
 180   }
 181 
 182   inline void allocate_free_regions_starting_at(uint first, uint num_regions);
 183 


 210 
 211   // Expand the sequence to reflect that the heap has grown. Either create new
 212   // HeapRegions, or re-use existing ones. Returns the number of regions the
 213   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
 214   // number of regions might be smaller than what's desired.
 215   uint expand_by(uint num_regions);
 216 
 217   // Makes sure that the regions from start to start+num_regions-1 are available
 218   // for allocation. Returns the number of regions that were committed to achieve
 219   // this.
 220   uint expand_at(uint start, uint num_regions);
 221 
 222   // Find a contiguous set of empty regions of length num. Returns the start index of
 223   // that set, or G1_NO_HRM_INDEX.
 224   uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
 225   // Find a contiguous set of empty or unavailable regions of length num. Returns the
 226   // start index of that set, or G1_NO_HRM_INDEX.
 227   uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 228 
 229   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 230 
 231   // Find the highest available empty or uncommitted region in the reserved heap.
 232   uint find_highest_available(bool* expanded);
 233 
 234   // Apply blk->doHeapRegion() on all committed regions in address order,
 235   // terminating the iteration early if doHeapRegion() returns true.
 236   void iterate(HeapRegionClosure* blk) const;
 237 
 238   void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
 239 
 240   // Uncommit up to num_regions_to_remove regions that are completely free.
 241   // Return the actual number of uncommitted regions.
 242   uint shrink_by(uint num_regions_to_remove);
 243 
 244   void verify();
 245 
 246   // Do some sanity checking.
 247   void verify_optional() PRODUCT_RETURN;
 248 };
 249 
 250 // The HeapRegionClaimer is used during parallel iteration over heap regions,
 251 // allowing workers to claim heap regions, gaining exclusive rights to these regions.
 252 class HeapRegionClaimer : public StackObj {


< prev index next >