< prev index next >

src/share/vm/gc/g1/heapRegionManager.hpp

Print this page




 133                     _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
 134   { }
 135 
 136   void initialize(G1RegionToSpaceMapper* heap_storage,
 137                   G1RegionToSpaceMapper* prev_bitmap,
 138                   G1RegionToSpaceMapper* next_bitmap,
 139                   G1RegionToSpaceMapper* bot,
 140                   G1RegionToSpaceMapper* cardtable,
 141                   G1RegionToSpaceMapper* card_counts);
 142 
 143   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
 144   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
 145   // the heap from the lowest address, this region (and its associated data
 146   // structures) are available and we do not need to check further.
 147   HeapRegion* get_dummy_region() { return new_heap_region(0); }
 148 
 149   // Return the HeapRegion at the given index. Assume that the index
 150   // is valid.
 151   inline HeapRegion* at(uint index) const;
 152 



 153   // If addr is within the committed space return its corresponding
 154   // HeapRegion, otherwise return NULL.
 155   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 156 
 157   // Insert the given region into the free region list.
 158   inline void insert_into_free_list(HeapRegion* hr);
 159 
 160   // Insert the given region list into the global free region list.
 161   void insert_list_into_free_list(FreeRegionList* list) {
 162     _free_list.add_ordered(list);
 163   }
 164 
 165   HeapRegion* allocate_free_region(bool is_old) {
 166     HeapRegion* hr = _free_list.remove_region(is_old);
 167 
 168     if (hr != NULL) {
 169       assert(hr->next() == NULL, "Single region should not have next");
 170       assert(is_available(hr->hrm_index()), "Must be committed");
 171     }
 172     return hr;




 133                     _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
 134   { }
 135 
 136   void initialize(G1RegionToSpaceMapper* heap_storage,
 137                   G1RegionToSpaceMapper* prev_bitmap,
 138                   G1RegionToSpaceMapper* next_bitmap,
 139                   G1RegionToSpaceMapper* bot,
 140                   G1RegionToSpaceMapper* cardtable,
 141                   G1RegionToSpaceMapper* card_counts);
 142 
 143   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
 144   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
 145   // the heap from the lowest address, this region (and its associated data
 146   // structures) are available and we do not need to check further.
 147   HeapRegion* get_dummy_region() { return new_heap_region(0); }
 148 
 149   // Return the HeapRegion at the given index. Assume that the index
 150   // is valid.
 151   inline HeapRegion* at(uint index) const;
 152 
 153   // Return the next region (by index) if that region is also humongous, NULL otherwise.
 154   inline HeapRegion* next_humongous_region(HeapRegion* hr) const;
 155 
 156   // If addr is within the committed space return its corresponding
 157   // HeapRegion, otherwise return NULL.
 158   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 159 
 160   // Insert the given region into the free region list.
 161   inline void insert_into_free_list(HeapRegion* hr);
 162 
 163   // Insert the given region list into the global free region list.
 164   void insert_list_into_free_list(FreeRegionList* list) {
 165     _free_list.add_ordered(list);
 166   }
 167 
 168   HeapRegion* allocate_free_region(bool is_old) {
 169     HeapRegion* hr = _free_list.remove_region(is_old);
 170 
 171     if (hr != NULL) {
 172       assert(hr->next() == NULL, "Single region should not have next");
 173       assert(is_available(hr->hrm_index()), "Must be committed");
 174     }
 175     return hr;


< prev index next >