1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc/g1/g1ConcurrentRefine.hpp"
  28 #include "gc/g1/heapRegion.hpp"
  29 #include "gc/g1/heapRegionManager.inline.hpp"
  30 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 #include "memory/allocation.hpp"
  33 
  34 // expand_by() is called to grow the heap. We grow into nvdimm now.
  35 // Dram regions are committed later as needed during mutator region allocation or 
  36 // when young list target length is determined after gc cycle.
  37 uint HeterogeneousHeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
  38   uint num_expanded = expand_nvdimm(MIN2(num_regions, max_expandable_length() - total_regions_committed()), pretouch_workers);
  39   assert(total_regions_committed() <= max_expandable_length(), "must be");
  40   return num_expanded;
  41 }
  42 
  43 // Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram).
  44 // Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm.
  45 // So we only allocate regions in the same kind of memory as 'start'.
  46 uint HeterogeneousHeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
  47   if (num_regions == 0) {
  48     return 0;
  49   }
  50   uint target_num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed());
  51   uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram();
  52   uint num_expanded = expand_in_range(start, end, target_num_regions, pretouch_workers);
  53   assert(total_regions_committed() <= max_expandable_length(), "must be");
  54   return num_expanded;
  55 }
  56 
  57 // This function ensures that there are 'expected_num_regions' committed regions in dram.
  58 // If new regions are committed, it un-commits that many regions from nv-dimm.
  59 // If there are already more regions committed in dram, extra regions are un-committed.
  60 void HeterogeneousHeapRegionManager::adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) {
  61 
  62   assert(total_regions_committed() <= max_expandable_length(), "must be");
  63   if(expected_num_regions > free_list_dram_length()) {
  64     // If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing.
  65     uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPerc / 100);
  66     uint to_be_made_available = targeted_dram_regions - free_list_dram_length();
  67 
  68 #ifdef ASSERT
  69     uint total_committed_before = total_regions_committed();
  70 #endif
  71     uint can_be_made_available = shrink_nvdimm(to_be_made_available);
  72     uint ret = expand_dram(can_be_made_available, pretouch_workers);
  73 #ifdef ASSERT
  74     assert(ret == can_be_made_available, "should be equal");
  75     assert(total_committed_before == total_regions_committed(), "invariant not met");
  76     assert(total_regions_committed() <= _max_regions, "post-condition");
  77 #endif
  78   } else {
  79     uint to_be_released = free_list_dram_length() - expected_num_regions;
  80     // if number of extra DRAM regions is small, do not shrink.
  81     if (to_be_released < expected_num_regions * G1YoungExpansionBufferPerc / 100) {
  82       return;
  83     }
  84 
  85 #ifdef ASSERT
  86     uint total_committed_before = total_regions_committed();
  87 #endif
  88     uint ret = shrink_dram(to_be_released);
  89     assert(ret == to_be_released, "Should be able to shrink by given amount");
  90     ret = expand_nvdimm(to_be_released, pretouch_workers);
  91 #ifdef ASSERT
  92     assert(ret == to_be_released, "Should be able to expand by given amount");
  93     assert(total_committed_before == total_regions_committed(), "invariant not met");
  94     assert(total_regions_committed() <= _max_regions, "post-condition");
  95 #endif
  96   }
  97   assert(total_regions_committed() <= max_expandable_length(), "must be");
  98 }
  99 
 100 uint HeterogeneousHeapRegionManager::total_regions_committed() const {
 101   return num_committed_dram() + num_committed_nvdimm();
 102 }
 103 
 104 uint HeterogeneousHeapRegionManager::num_committed_dram() const {
 105   // This class does not keep count of committed regions in dram and nv-dimm.
 106   // G1RegionToHeteroSpaceMapper keeps this information.
 107   return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_dram();
 108 }
 109 
 110 uint HeterogeneousHeapRegionManager::num_committed_nvdimm() const {
 111   // See comment for num_committed_dram()
 112   return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_nvdimm();
 113 }
 114 
 115 // Return maximum number of regions that heap can expand to.
 116 uint HeterogeneousHeapRegionManager::max_expandable_length() const {
 117   return _max_regions;
 118 }
 119 
 120 uint HeterogeneousHeapRegionManager::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const {
 121   guarantee(res_idx != NULL, "checking");
 122   guarantee(start_idx <= (max_length() + 1), "checking");
 123 
 124   uint num_regions = 0;
 125 
 126   uint cur = start_idx;
 127   while (cur <= end_idx && is_available(cur)) {
 128     cur++;
 129   }
 130   if (cur == end_idx + 1) {
 131     return num_regions;
 132   }
 133   *res_idx = cur;
 134   while (cur <= end_idx && !is_available(cur)) {
 135     cur++;
 136   }
 137   num_regions = cur - *res_idx;
 138 
 139 #ifdef ASSERT
 140   for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
 141     assert(!is_available(i), "just checking");
 142   }
 143   assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur),
 144     "The region at the current position %u must be available or at the end", cur);
 145 #endif
 146   return num_regions;
 147 }
 148 
 149 uint HeterogeneousHeapRegionManager::expand_dram(uint num_regions, WorkGang* pretouch_workers) {
 150   return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers);
 151 }
 152 
 153 uint HeterogeneousHeapRegionManager::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) {
 154   return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers);
 155 }
 156 
 157 // Follows same logic as expand_at() form HeapRegionManager.
 158 uint HeterogeneousHeapRegionManager::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) {
 159 
 160   uint so_far = 0;
 161   uint chunk_start = 0;
 162   uint num_last_found = 0;
 163   while (so_far < num_regions &&
 164          (num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) {
 165     uint to_commit = MIN2(num_regions - so_far, num_last_found);
 166     make_regions_available(chunk_start, to_commit, pretouch_gang);
 167     so_far += to_commit;
 168     start = chunk_start + to_commit + 1;
 169   }
 170 
 171   return so_far;
 172 }
 173 
 174 // Shrink in the range of indexes which are reserved for dram.
 175 uint HeterogeneousHeapRegionManager::shrink_dram(uint num_regions, bool update_free_list) {
 176   return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list);
 177 }
 178 
 179 // Shrink in the range of indexes which are reserved for nv-dimm.
 180 uint HeterogeneousHeapRegionManager::shrink_nvdimm(uint num_regions, bool update_free_list) {
 181   return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list);
 182 }
 183 
 184 // Find empty regions in given range, un-commit them and return the count.
 185 uint HeterogeneousHeapRegionManager::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) {
 186 
 187   if (num_regions == 0) {
 188     return 0;
 189   }
 190   uint so_far = 0;
 191   uint idx_last_found = 0;
 192   uint num_last_found;
 193   while (so_far < num_regions &&
 194          (num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) {
 195     uint to_uncommit = MIN2(num_regions - so_far, num_last_found);
 196     if(update_free_list) {
 197       _free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit);
 198     }
 199     uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
 200     so_far += to_uncommit;
 201     end = idx_last_found;
 202   }
 203   return so_far;
 204 }
 205 
 206 uint HeterogeneousHeapRegionManager::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) {
 207   guarantee(res_idx != NULL, "checking");
 208   guarantee(start_idx < max_length(), "checking");
 209   guarantee(end_idx < max_length(), "checking");
 210   if(start_idx > end_idx) {
 211     return 0;
 212   }
 213 
 214   uint num_regions_found = 0;
 215 
 216   jlong cur = end_idx;
 217   while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) {
 218     cur--;
 219   }
 220   if (cur == start_idx - 1) {
 221     return num_regions_found;
 222   }
 223   jlong old_cur = cur;
 224   // cur indexes the first empty region
 225   while (cur >= start_idx && is_available(cur) && at(cur)->is_empty()) {
 226     cur--;
 227   }
 228   *res_idx = cur + 1;
 229   num_regions_found = old_cur - cur;
 230 
 231 #ifdef ASSERT
 232   for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
 233     assert(at(i)->is_empty(), "just checking");
 234   }
 235 #endif
 236   return num_regions_found;
 237 }
 238 
 239 HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(bool is_old) {
 240   // old region is allocated from nv-dimm, non-old region from dram
 241   // assumption: dram regions take higher indexes
 242   assert(total_regions_committed() <= max_expandable_length(), "must be");
 243   bool from_head = is_old ? true : false;
 244   HeapRegion* hr = _free_list.remove_region(from_head);
 245 
 246   if (hr != NULL && ( (is_old && !is_in_nvdimm(hr->hrm_index())) || (!is_old && !is_in_dram(hr->hrm_index())) ) ) {
 247     _free_list.add_ordered(hr);
 248     hr = NULL;
 249   }
 250 
 251 #ifdef ASSERT
 252   uint total_committed_before = total_regions_committed();
 253 #endif
 254 
 255   if (hr == NULL) {
 256     if (!is_old) {
 257       uint ret = shrink_nvdimm(1);
 258       if (ret == 1) {
 259         ret = expand_dram(1, NULL);
 260         assert(ret == 1, "We should be able to commit one region");
 261         hr = _free_list.remove_region(from_head);
 262       }
 263     }
 264     else { /*is_old*/
 265       uint ret = shrink_dram(1);
 266       if (ret == 1) {
 267         ret = expand_nvdimm(1, NULL);
 268         assert(ret == 1, "We should be able to commit one region");
 269         hr = _free_list.remove_region(from_head);
 270       }
 271     }
 272   }
 273 #ifdef ASSERT
 274   assert(total_committed_before == total_regions_committed(), "invariant not met");
 275   assert(total_regions_committed() <= max_expandable_length(), "post-condition");
 276 #endif
 277 
 278   if (hr != NULL) {
 279     assert(hr->next() == NULL, "Single region should not have next");
 280     assert(is_available(hr->hrm_index()), "Must be committed");
 281   }
 282   assert(total_regions_committed() <= max_expandable_length(), "must be");
 283   return hr;
 284 }
 285 
 286 uint HeterogeneousHeapRegionManager::find_contiguous_only_empty(size_t num) {
 287   return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true);
 288 }
 289 
 290 uint HeterogeneousHeapRegionManager::find_contiguous_empty_or_unavailable(size_t num) {
 291   return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false);
 292 }
 293 
 294 uint HeterogeneousHeapRegionManager::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) {
 295   uint found = 0;
 296   size_t length_found = 0;
 297   uint cur = (uint)start;
 298   uint length_unavailable = 0;
 299 
 300   while (length_found < num && cur <= end) {
 301     HeapRegion* hr = _regions.get_by_index(cur);
 302     if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
 303       // This region is a potential candidate for allocation into.
 304       if (!is_available(cur)) {
 305         length_unavailable++;
 306       }
 307       length_found++;
 308     }
 309     else {
 310       // This region is not a candidate. The next region is the next possible one.
 311       found = cur + 1;
 312       length_found = 0;
 313     }
 314     cur++;
 315   }
 316 
 317   if (length_found == num) {
 318     for (uint i = found; i < (found + num); i++) {
 319       HeapRegion* hr = _regions.get_by_index(i);
 320       // sanity check
 321       guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
 322                 "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
 323                 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
 324     }
 325     if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) {
 326       // if 'length_unavailable' number of regions will be made available, we will exceed max regions.
 327       return G1_NO_HRM_INDEX;
 328     }
 329     return found;
 330   }
 331   else {
 332     return G1_NO_HRM_INDEX;
 333   }
 334 }
 335 
 336 uint HeterogeneousHeapRegionManager::find_highest_free(bool* expanded) {
 337   // Loop downwards from the highest dram region index, looking for an
 338   // entry which is either free or not yet committed.  If not yet
 339   // committed, expand_at that index.
 340   uint curr = end_index_of_dram();
 341   while (true) {
 342     HeapRegion *hr = _regions.get_by_index(curr);
 343     if (hr == NULL && !(total_regions_committed() < _max_regions)) {
 344       uint res = shrink_nvdimm(1);
 345       if (res == 1) {
 346         res = expand_in_range(curr, curr, 1, NULL);
 347         assert(res == 1, "We should be able to expand since shrink was successful");
 348         *expanded = true;
 349         return curr;
 350       }
 351     }
 352     else {
 353       if (hr->is_free()) {
 354         *expanded = false;
 355         return curr;
 356       }
 357     }
 358     if (curr == start_index_of_dram()) {
 359       return G1_NO_HRM_INDEX;
 360     }
 361     curr--;
 362   }
 363 }
 364 
 365 // We need to override this since region 0 which serves are dummy region in base class may not be available here.
 366 // This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size
 367 // could be just one region.  This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm)
 368 // unavailable.
 369 HeapRegion* HeterogeneousHeapRegionManager::get_dummy_region() {
 370   uint curr = 0;
 371 
 372   while (curr < _regions.length()) {
 373     if (is_available(curr)) {
 374       return new_heap_region(curr);
 375     }
 376     curr++;
 377   }
 378   assert(false, "We should always find a region available for dummy region");
 379   return NULL;
 380 }
 381 
 382 // First shrink in dram, then in nv-dimm.
 383 uint HeterogeneousHeapRegionManager::shrink_by(uint num_regions) {
 384   // This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()).
 385   // So shrink() calls below do not need to remove uncomitted regions from free list.
 386   uint ret = shrink_dram(num_regions, false /* update_free_list */);
 387   ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */);
 388   return ret;
 389 }
 390 
 391 void HeterogeneousHeapRegionManager::verify() {
 392   HeapRegionManager::verify();
 393 }
 394 
 395 uint HeterogeneousHeapRegionManager::free_list_dram_length() const {
 396   return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram());
 397 }
 398 
 399 uint HeterogeneousHeapRegionManager::free_list_nvdimm_length() const {
 400   return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm());
 401 }
 402 
 403 bool HeterogeneousHeapRegionManager::is_in_nvdimm(uint index) const {
 404   return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm();
 405 }
 406 
 407 bool HeterogeneousHeapRegionManager::is_in_dram(uint index) const {
 408   return index >= start_index_of_dram() && index <= end_index_of_dram();
 409 }
 410 
 411 // We have to make sure full collection copies all surviving objects to NV-DIMM.
 412 // We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection.
 413 // Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size.
 414 // After full collection ends, we will re-establish this in-variant by freeing DRAM regions.
 415 void HeterogeneousHeapRegionManager::prepare_for_full_collection_start() {
 416   _total_commited_before_full_gc = total_regions_committed();
 417   expand_nvdimm(num_committed_dram(), NULL);
 418   remove_all_free_regions();
 419 }
 420 
 421 // We need to bring back the total committed regions to before full collection start.
 422 // All regular regions (not pinned regions) in DRAM should be free.
 423 // We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions)
 424 void HeterogeneousHeapRegionManager::prepare_for_full_collection_end() {
 425   uint shrink_size = total_regions_committed() - _total_commited_before_full_gc;
 426   uint so_far = 0;
 427   uint idx_last_found = 0;
 428   uint num_last_found;
 429   uint end = (uint)_regions.length() - 1;
 430   while (so_far < shrink_size &&
 431          (num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) {
 432     uint to_uncommit = MIN2(shrink_size - so_far, num_last_found);
 433     uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
 434     so_far += to_uncommit;
 435     end = idx_last_found;
 436   }
 437   assert(so_far == shrink_size, "We should be able to shrink this many regions");
 438 }
 439 
 440 uint HeterogeneousHeapRegionManager::start_index_of_dram() const { return _max_regions;}
 441 
 442 uint HeterogeneousHeapRegionManager::end_index_of_dram() const { return 2*_max_regions - 1; }
 443 
 444 uint HeterogeneousHeapRegionManager::start_index_of_nvdimm() const { return 0; }
 445 
 446 uint HeterogeneousHeapRegionManager::end_index_of_nvdimm() const { return _max_regions - 1; }