< prev index next >

src/hotspot/share/gc/g1/heapRegionManager.cpp

Print this page
rev 56448 : imported patch 8220310.mut.0
rev 56449 : imported patch 8220310.mut.1
rev 56450 : imported patch 8220310.mut.2
rev 56451 : imported patch 8220310.mut.3
rev 56452 : [mq]: 8220310.mut.4


  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Arguments.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1ConcurrentRefine.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/g1/heapRegionManager.inline.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 #include "gc/g1/heterogeneousHeapRegionManager.hpp"

  33 #include "memory/allocation.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 
  36 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
  37 public:
  38   void check_mt_safety() {
  39     // Master Free List MT safety protocol:
  40     // (a) If we're at a safepoint, operations on the master free list
  41     // should be invoked by either the VM thread (which will serialize
  42     // them) or by the GC workers while holding the
  43     // FreeList_lock.
  44     // (b) If we're not at a safepoint, operations on the master free
  45     // list should be invoked while holding the Heap_lock.
  46 
  47     if (SafepointSynchronize::is_at_safepoint()) {
  48       guarantee(Thread::current()->is_VM_thread() ||
  49                 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
  50     } else {
  51       guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
  52     }


  86   _heap_mapper = heap_storage;
  87 
  88   _prev_bitmap_mapper = prev_bitmap;
  89   _next_bitmap_mapper = next_bitmap;
  90 
  91   _bot_mapper = bot;
  92   _cardtable_mapper = cardtable;
  93 
  94   _card_counts_mapper = card_counts;
  95 
  96   MemRegion reserved = heap_storage->reserved();
  97   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
  98 
  99   _available_map.initialize(_regions.length());
 100 }
 101 
 102 bool HeapRegionManager::is_available(uint region) const {
 103   return _available_map.at(region);
 104 }
 105 























 106 #ifdef ASSERT
 107 bool HeapRegionManager::is_free(HeapRegion* hr) const {
 108   return _free_list.contains(hr);
 109 }
 110 #endif
 111 
 112 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
 113   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 114   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
 115   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 116   assert(reserved().contains(mr), "invariant");
 117   return g1h->new_heap_region(hrm_index, mr);
 118 }
 119 
 120 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
 121   guarantee(num_regions > 0, "Must commit more than zero regions");
 122   guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
 123 
 124   _num_committed += (uint)num_regions;
 125 
 126   _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
 127 
 128   // Also commit auxiliary data
 129   _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 130   _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 131 
 132   _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
 133   _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
 134 
 135   _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
 136 }
 137 
 138 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
 139   guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
 140   guarantee(_num_committed >= num_regions, "pre-condition");
 141 





 142   // Print before uncommitting.
 143   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 144     for (uint i = start; i < start + num_regions; i++) {
 145       HeapRegion* hr = at(i);
 146       G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
 147     }
 148   }
 149 
 150   _num_committed -= (uint)num_regions;
 151 
 152   _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
 153   _heap_mapper->uncommit_regions(start, num_regions);
 154 
 155   // Also uncommit auxiliary data
 156   _prev_bitmap_mapper->uncommit_regions(start, num_regions);
 157   _next_bitmap_mapper->uncommit_regions(start, num_regions);
 158 
 159   _bot_mapper->uncommit_regions(start, num_regions);
 160   _cardtable_mapper->uncommit_regions(start, num_regions);
 161 


 169     if (_regions.get_by_index(i) == NULL) {
 170       HeapRegion* new_hr = new_heap_region(i);
 171       OrderAccess::storestore();
 172       _regions.set_by_index(i, new_hr);
 173       _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
 174     }
 175   }
 176 
 177   _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
 178 
 179   for (uint i = start; i < start + num_regions; i++) {
 180     assert(is_available(i), "Just made region %u available but is apparently not.", i);
 181     HeapRegion* hr = at(i);
 182     if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 183       G1CollectedHeap::heap()->hr_printer()->commit(hr);
 184     }
 185     HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
 186     MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 187 
 188     hr->initialize(mr);

 189     insert_into_free_list(at(i));
 190   }
 191 }
 192 
 193 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
 194   size_t used_sz =
 195     _prev_bitmap_mapper->committed_size() +
 196     _next_bitmap_mapper->committed_size() +
 197     _bot_mapper->committed_size() +
 198     _cardtable_mapper->committed_size() +
 199     _card_counts_mapper->committed_size();
 200 
 201   size_t committed_sz =
 202     _prev_bitmap_mapper->reserved_size() +
 203     _next_bitmap_mapper->reserved_size() +
 204     _bot_mapper->reserved_size() +
 205     _cardtable_mapper->reserved_size() +
 206     _card_counts_mapper->reserved_size();
 207 
 208   return MemoryUsage(0, used_sz, committed_sz, committed_sz);


 216   if (num_regions == 0) {
 217     return 0;
 218   }
 219 
 220   uint cur = start;
 221   uint idx_last_found = 0;
 222   uint num_last_found = 0;
 223 
 224   uint expanded = 0;
 225 
 226   while (expanded < num_regions &&
 227          (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
 228     uint to_expand = MIN2(num_regions - expanded, num_last_found);
 229     make_regions_available(idx_last_found, to_expand, pretouch_workers);
 230     expanded += to_expand;
 231     cur = idx_last_found + num_last_found + 1;
 232   }
 233 
 234   verify_optional();
 235   return expanded;





























 236 }
 237 
 238 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
 239   uint found = 0;
 240   size_t length_found = 0;
 241   uint cur = 0;
 242 
 243   while (length_found < num && cur < max_length()) {
 244     HeapRegion* hr = _regions.get_by_index(cur);
 245     if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
 246       // This region is a potential candidate for allocation into.
 247       length_found++;
 248     } else {
 249       // This region is not a candidate. The next region is the next possible one.
 250       found = cur + 1;
 251       length_found = 0;
 252     }
 253     cur++;
 254   }
 255 




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Arguments.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1ConcurrentRefine.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/g1/heapRegionManager.inline.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "memory/allocation.hpp"
  35 #include "utilities/bitMap.inline.hpp"
  36 
  37 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
  38 public:
  39   void check_mt_safety() {
  40     // Master Free List MT safety protocol:
  41     // (a) If we're at a safepoint, operations on the master free list
  42     // should be invoked by either the VM thread (which will serialize
  43     // them) or by the GC workers while holding the
  44     // FreeList_lock.
  45     // (b) If we're not at a safepoint, operations on the master free
  46     // list should be invoked while holding the Heap_lock.
  47 
  48     if (SafepointSynchronize::is_at_safepoint()) {
  49       guarantee(Thread::current()->is_VM_thread() ||
  50                 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
  51     } else {
  52       guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
  53     }


  87   _heap_mapper = heap_storage;
  88 
  89   _prev_bitmap_mapper = prev_bitmap;
  90   _next_bitmap_mapper = next_bitmap;
  91 
  92   _bot_mapper = bot;
  93   _cardtable_mapper = cardtable;
  94 
  95   _card_counts_mapper = card_counts;
  96 
  97   MemRegion reserved = heap_storage->reserved();
  98   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
  99 
 100   _available_map.initialize(_regions.length());
 101 }
 102 
 103 bool HeapRegionManager::is_available(uint region) const {
 104   return _available_map.at(region);
 105 }
 106 
 107 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
 108   HeapRegion* hr = NULL;
 109   bool from_head = !type.is_young();
 110 
 111   if (requested_node_index != G1NUMA::AnyNodeIndex && G1NUMA::numa()->is_enabled()) {
 112     // Try to allocate with requested node index.
 113     hr = _free_list.remove_region_with_node_index(from_head, requested_node_index, NULL);
 114   }
 115 
 116   if (hr == NULL) {
 117     // If there's a single active node or we did not get a region from our requested node,
 118     // try without requested node index.
 119     hr = _free_list.remove_region(from_head);
 120   }
 121 
 122   if (hr != NULL) {
 123     assert(hr->next() == NULL, "Single region should not have next");
 124     assert(is_available(hr->hrm_index()), "Must be committed");
 125   }
 126 
 127   return hr;
 128 }
 129 
 130 #ifdef ASSERT
 131 bool HeapRegionManager::is_free(HeapRegion* hr) const {
 132   return _free_list.contains(hr);
 133 }
 134 #endif
 135 
 136 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
 137   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 138   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
 139   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 140   assert(reserved().contains(mr), "invariant");
 141   return g1h->new_heap_region(hrm_index, mr);
 142 }
 143 
 144 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
 145   guarantee(num_regions > 0, "Must commit more than zero regions");
 146   guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
 147 
 148   _num_committed += (uint)num_regions;
 149 
 150   _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
 151 
 152   // Also commit auxiliary data
 153   _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 154   _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 155 
 156   _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
 157   _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
 158 
 159   _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
 160 }
 161 
 162 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
 163   guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
 164   guarantee(_num_committed >= num_regions, "pre-condition");
 165 
 166   // Reset node index to distinguish with committed regions.
 167   for (uint i = start; i < start + num_regions; i++) {
 168     at(i)->set_node_index(G1NUMA::UnknownNodeIndex);
 169   }
 170 
 171   // Print before uncommitting.
 172   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 173     for (uint i = start; i < start + num_regions; i++) {
 174       HeapRegion* hr = at(i);
 175       G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
 176     }
 177   }
 178 
 179   _num_committed -= (uint)num_regions;
 180 
 181   _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
 182   _heap_mapper->uncommit_regions(start, num_regions);
 183 
 184   // Also uncommit auxiliary data
 185   _prev_bitmap_mapper->uncommit_regions(start, num_regions);
 186   _next_bitmap_mapper->uncommit_regions(start, num_regions);
 187 
 188   _bot_mapper->uncommit_regions(start, num_regions);
 189   _cardtable_mapper->uncommit_regions(start, num_regions);
 190 


 198     if (_regions.get_by_index(i) == NULL) {
 199       HeapRegion* new_hr = new_heap_region(i);
 200       OrderAccess::storestore();
 201       _regions.set_by_index(i, new_hr);
 202       _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
 203     }
 204   }
 205 
 206   _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
 207 
 208   for (uint i = start; i < start + num_regions; i++) {
 209     assert(is_available(i), "Just made region %u available but is apparently not.", i);
 210     HeapRegion* hr = at(i);
 211     if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 212       G1CollectedHeap::heap()->hr_printer()->commit(hr);
 213     }
 214     HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
 215     MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 216 
 217     hr->initialize(mr);
 218     hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
 219     insert_into_free_list(at(i));
 220   }
 221 }
 222 
 223 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
 224   size_t used_sz =
 225     _prev_bitmap_mapper->committed_size() +
 226     _next_bitmap_mapper->committed_size() +
 227     _bot_mapper->committed_size() +
 228     _cardtable_mapper->committed_size() +
 229     _card_counts_mapper->committed_size();
 230 
 231   size_t committed_sz =
 232     _prev_bitmap_mapper->reserved_size() +
 233     _next_bitmap_mapper->reserved_size() +
 234     _bot_mapper->reserved_size() +
 235     _cardtable_mapper->reserved_size() +
 236     _card_counts_mapper->reserved_size();
 237 
 238   return MemoryUsage(0, used_sz, committed_sz, committed_sz);


 246   if (num_regions == 0) {
 247     return 0;
 248   }
 249 
 250   uint cur = start;
 251   uint idx_last_found = 0;
 252   uint num_last_found = 0;
 253 
 254   uint expanded = 0;
 255 
 256   while (expanded < num_regions &&
 257          (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
 258     uint to_expand = MIN2(num_regions - expanded, num_last_found);
 259     make_regions_available(idx_last_found, to_expand, pretouch_workers);
 260     expanded += to_expand;
 261     cur = idx_last_found + num_last_found + 1;
 262   }
 263 
 264   verify_optional();
 265   return expanded;
 266 }
 267 
 268 uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
 269   uint expand_candidate = UINT_MAX;
 270   for (uint i = 0; i < max_length(); i++) {
 271     if (is_available(i)) {
 272       // Already in use continue
 273       continue;
 274     }
 275     // Always save the candidate so we can expand later on.
 276     expand_candidate = i;
 277     if (is_on_preferred_index(expand_candidate, preferred_index)) {
 278       // We have found a candidate on the preffered node, break.
 279       break;
 280     }
 281   }
 282 
 283   if (expand_candidate == UINT_MAX) {
 284      // No regions left, expand failed.
 285     return 0;
 286   }
 287 
 288   make_regions_available(expand_candidate, 1, NULL);
 289   return 1;
 290 }
 291 
 292 bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
 293   uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index);
 294   return region_node_index == preferred_node_index;
 295 }
 296 
 297 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
 298   uint found = 0;
 299   size_t length_found = 0;
 300   uint cur = 0;
 301 
 302   while (length_found < num && cur < max_length()) {
 303     HeapRegion* hr = _regions.get_by_index(cur);
 304     if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
 305       // This region is a potential candidate for allocation into.
 306       length_found++;
 307     } else {
 308       // This region is not a candidate. The next region is the next possible one.
 309       found = cur + 1;
 310       length_found = 0;
 311     }
 312     cur++;
 313   }
 314 


< prev index next >