1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BiasedArray.hpp"
  27 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/align.hpp"
  32 #include "utilities/bitMap.inline.hpp"
  33 
  34 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
  35                                              size_t used_size,
  36                                              size_t page_size,
  37                                              size_t region_granularity,
  38                                              size_t commit_factor,
  39                                              MemoryType type) :
  40   _listener(NULL),
  41   _storage(rs, used_size, page_size),
  42   _region_granularity(region_granularity),
  43   _commit_map(rs.size() * commit_factor / region_granularity, mtGC) {
  44   guarantee(is_power_of_2(page_size), "must be");
  45   guarantee(is_power_of_2(region_granularity), "must be");
  46 
  47   MemTracker::record_virtual_memory_type((address)rs.base(), type);
  48 }
  49 
  50 // G1RegionToSpaceMapper implementation where the region granularity is larger than
  51 // or the same as the commit granularity.
  52 // Basically, the space corresponding to one region region spans several OS pages.
  53 class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
  54  private:
  55   size_t _pages_per_region;
  56 
  57  public:
  58   G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
  59                                       size_t actual_size,
  60                                       size_t page_size,
  61                                       size_t alloc_granularity,
  62                                       size_t commit_factor,
  63                                       MemoryType type) :
  64     G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
  65     _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
  66 
  67     guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
  68   }
  69 
  70   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
  71     size_t const start_page = (size_t)start_idx * _pages_per_region;
  72     bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region);
  73     if (AlwaysPreTouch) {
  74       _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang);
  75     }
  76     _commit_map.set_range(start_idx, start_idx + num_regions);
  77     fire_on_commit(start_idx, num_regions, zero_filled);
  78   }
  79 
  80   virtual void uncommit_regions(uint start_idx, size_t num_regions) {
  81     _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
  82     _commit_map.clear_range(start_idx, start_idx + num_regions);
  83   }
  84 };
  85 
  86 // G1RegionToSpaceMapper implementation where the region granularity is smaller
  87 // than the commit granularity.
  88 // Basically, the contents of one OS page span several regions.
  89 class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
  90  private:
  91   class CommitRefcountArray : public G1BiasedMappedArray<uint> {
  92    protected:
  93      virtual uint default_value() const { return 0; }
  94   };
  95 
  96   size_t _regions_per_page;
  97 
  98   CommitRefcountArray _refcounts;
  99 
 100   uintptr_t region_idx_to_page_idx(uint region) const {
 101     return region / _regions_per_page;
 102   }
 103 
 104  public:
 105   G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
 106                                        size_t actual_size,
 107                                        size_t page_size,
 108                                        size_t alloc_granularity,
 109                                        size_t commit_factor,
 110                                        MemoryType type) :
 111     G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
 112     _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
 113 
 114     guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
 115     _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size);
 116   }
 117 
 118   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
 119     size_t const NoPage = ~(size_t)0;
 120 
 121     size_t first_committed = NoPage;
 122     size_t num_committed = 0;
 123 
 124     bool all_zero_filled = true;
 125 
 126     for (uint i = start_idx; i < start_idx + num_regions; i++) {
 127       assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i);
 128       size_t idx = region_idx_to_page_idx(i);
 129       uint old_refcount = _refcounts.get_by_index(idx);
 130 
 131       bool zero_filled = false;
 132       if (old_refcount == 0) {
 133         if (first_committed == NoPage) {
 134           first_committed = idx;
 135           num_committed = 1;
 136         } else {
 137           num_committed++;
 138         }
 139         zero_filled = _storage.commit(idx, 1);
 140       }
 141       all_zero_filled &= zero_filled;
 142 
 143       _refcounts.set_by_index(idx, old_refcount + 1);
 144       _commit_map.set_bit(i);
 145     }
 146     if (AlwaysPreTouch && num_committed > 0) {
 147       _storage.pretouch(first_committed, num_committed, pretouch_gang);
 148     }
 149     fire_on_commit(start_idx, num_regions, all_zero_filled);
 150   }
 151 
 152   virtual void uncommit_regions(uint start_idx, size_t num_regions) {
 153     for (uint i = start_idx; i < start_idx + num_regions; i++) {
 154       assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i);
 155       size_t idx = region_idx_to_page_idx(i);
 156       uint old_refcount = _refcounts.get_by_index(idx);
 157       assert(old_refcount > 0, "must be");
 158       if (old_refcount == 1) {
 159         _storage.uncommit(idx, 1);
 160       }
 161       _refcounts.set_by_index(idx, old_refcount - 1);
 162       _commit_map.clear_bit(i);
 163     }
 164   }
 165 };
 166 
 167 void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 168   if (_listener != NULL) {
 169     _listener->on_commit(start_idx, num_regions, zero_filled);
 170   }
 171 }
 172 
 173 void G1RegionToHeteroSpaceMapper::map_nvdimm_space(ReservedSpace rs) {
 174   assert(AllocateOldGenAt != NULL, "");
 175   int _backing_fd = os::create_file_for_heap(AllocateOldGenAt);
 176   if (_backing_fd == -1) {
 177     vm_exit_during_initialization(
 178       err_msg("Could not create file for Old generation at location %s", AllocateOldGenAt));
 179   }
 180   // commit this memory in nv-dimm
 181   char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd);
 182   //char* ret = os::replace_existing_mapping_with_file_mapping(rs.base(), rs.size(), _backing_fd);
 183   if (ret != rs.base()) {
 184     if (ret != NULL) {
 185       os::unmap_memory(rs.base(), rs.size());
 186     }
 187     vm_exit_during_initialization(
 188       err_msg("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt));
 189   }
 190 }
 191 
 192 G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs,
 193   size_t actual_size,
 194   size_t page_size,
 195   size_t alloc_granularity,
 196   size_t commit_factor,
 197   MemoryType type) :
 198   G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
 199   _num_committed_dram(0), _num_committed_nvdimm(0) {
 200   assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize");
 201 
 202   // Since we need to split the reserved space in half and map second half to file in NV-DIMM, we need to release the reserved memory first
 203   // Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping.
 204   os::release_memory(rs.base(), rs.size());
 205 
 206   /* Toggle HeteroHeap
 207   // We map first part of size Xmx to DRAM.
 208   ReservedSpace rs_dram = rs.first_part(MaxHeapSize);
 209   // Second half of reserved memory is mapped to NV-DIMM.
 210   ReservedSpace rs_nvdimm = rs.last_part(MaxHeapSize);*/
 211   // We map first part of size Xmx to NVDIMM.
 212   ReservedSpace rs_nvdimm = rs.first_part(MaxHeapSize);
 213   // Second half of reserved memory is mapped to DRAM.
 214   ReservedSpace rs_dram = rs.last_part(MaxHeapSize);
 215   assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same");
 216 
 217   // Reserve dram memory
 218   char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base());
 219   if (base != rs_dram.base()) {
 220     if (base != NULL) {
 221       os::release_memory(base, rs_dram.size());
 222     }
 223     vm_exit_during_initialization(err_msg("Error in allocating heap"));
 224   }
 225 
 226   // We reserve and commit this entire space to NV-DIMM.
 227   map_nvdimm_space(rs_nvdimm);
 228 
 229   if (alloc_granularity >= (page_size * commit_factor)) {
 230     _dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), page_size, alloc_granularity, commit_factor, type);
 231   }
 232   else {
 233     _dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), page_size, alloc_granularity, commit_factor, type);
 234   }
 235 
 236 /* Toggle HeteroHeap
 237   _start_index_of_nvdimm = (uint)(rs_dram.size() / alloc_granularity);
 238   _start_index_of_dram = 0; */
 239   _start_index_of_nvdimm = 0;
 240   _start_index_of_dram = (uint)(rs_nvdimm.size() / alloc_granularity);
 241 }
 242 
 243 void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
 244   uint end_idx = (start_idx + (uint)num_regions - 1);
 245 
 246   /* Toggle HeteroHeap
 247   uint num_nvdimm = end_idx >= _start_index_of_nvdimm ? MIN2((end_idx - _start_index_of_nvdimm + 1), (uint)num_regions) : 0;
 248   uint num_dram = (uint)num_regions - num_nvdimm;*/
 249   uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
 250   uint num_nvdimm = (uint)num_regions - num_dram;
 251 
 252   if (num_nvdimm > 0) {
 253     // We do not need to commit nv-dimm regions, since they are committed in the beginning.
 254     _num_committed_nvdimm += num_nvdimm;
 255   }
 256   if (num_dram > 0) {
 257     /* Toggle HeteroHeap
 258      _dram_mapper->commit_regions(start_idx, num_dram, pretouch_gang); */
 259     _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang);
 260     _num_committed_dram += num_dram;
 261   }
 262 }
 263 
 264 void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
 265   uint end_idx = (start_idx + (uint)num_regions - 1);
 266   /* Toggle HeterHeap
 267   uint num_nvdimm = end_idx >= _start_index_of_nvdimm ? MIN2((end_idx - _start_index_of_nvdimm + 1), (uint)num_regions) : 0;
 268   uint num_dram = (uint)num_regions - num_nvdimm;*/
 269   uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
 270   uint num_nvdimm = (uint)num_regions - num_dram;
 271 
 272   if (num_nvdimm > 0) {
 273     // We do not uncommit memory for nv-dimm regions.
 274     _num_committed_nvdimm -= num_nvdimm;
 275   }
 276 
 277   if (num_dram > 0) {
 278     _dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram);
 279     _num_committed_dram -= num_dram;
 280   }
 281 }
 282 
 283 uint G1RegionToHeteroSpaceMapper::num_committed_dram() {
 284   return _num_committed_dram;
 285 }
 286 
 287 uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() {
 288   return _num_committed_nvdimm;
 289 }
 290 
 291 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs,
 292   size_t actual_size,
 293   size_t page_size,
 294   size_t region_granularity,
 295   size_t commit_factor,
 296   MemoryType type) {
 297   if (AllocateOldGenAt != NULL) {
 298     return new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
 299   } else {
 300     return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
 301   }
 302 }
 303 
 304 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
 305                                                             size_t actual_size,
 306                                                             size_t page_size,
 307                                                             size_t region_granularity,
 308                                                             size_t commit_factor,
 309                                                             MemoryType type) {
 310   if (region_granularity >= (page_size * commit_factor)) {
 311     return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
 312   } else {
 313     return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
 314   }
 315 }