1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
  26 #define SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
  27 
  28 #include "gc/g1/g1BiasedArray.hpp"
  29 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  30 #include "gc/g1/heapRegionSet.hpp"
  31 #include "services/memoryUsage.hpp"
  32 
  33 class HeapRegion;
  34 class HeapRegionClosure;
  35 class HeapRegionClaimer;
  36 class FreeRegionList;
  37 class WorkGang;
  38 
  39 class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
  40  protected:
  41   virtual HeapRegion* default_value() const { return NULL; }
  42 };
  43 
  44 // This class keeps track of the actual heap memory, auxiliary data
  45 // and its metadata (i.e., HeapRegion instances) and the list of free regions.
  46 //
  47 // This allows maximum flexibility for deciding what to commit or uncommit given
  48 // a request from outside.
  49 //
  50 // HeapRegions are kept in the _regions array in address order. A region's
  51 // index in the array corresponds to its index in the heap (i.e., 0 is the
  52 // region at the bottom of the heap, 1 is the one after it, etc.). Two
  53 // regions that are consecutive in the array should also be adjacent in the
  54 // address space (i.e., region(i).end() == region(i+1).bottom().
  55 //
  56 // We create a HeapRegion when we commit the region's address space
  57 // for the first time. When we uncommit the address space of a
  58 // region we retain the HeapRegion to be able to re-use it in the
  59 // future (in case we recommit it).
  60 //
  61 // We keep track of three lengths:
  62 //
  63 // * _num_committed (returned by length()) is the number of currently
  64 //   committed regions. These may not be contiguous.
  65 // * _allocated_heapregions_length (not exposed outside this class) is the
  66 //   number of regions+1 for which we have HeapRegions.
  67 // * max_length() returns the maximum number of regions the heap can have.
  68 //
  69 
  70 class HeapRegionManager: public CHeapObj<mtGC> {
  71   friend class VMStructs;
  72   friend class HeapRegionClaimer;
  73 
  74   G1RegionToSpaceMapper* _prev_bitmap_mapper;
  75   G1RegionToSpaceMapper* _next_bitmap_mapper;
  76   G1RegionToSpaceMapper* _bot_mapper;
  77   G1RegionToSpaceMapper* _cardtable_mapper;
  78   G1RegionToSpaceMapper* _card_counts_mapper;
  79 
  80   // Each bit in this bitmap indicates that the corresponding region is available
  81   // for allocation.
  82   CHeapBitMap _available_map;
  83 
  84    // The number of regions committed in the heap.
  85   uint _num_committed;
  86 
  87   // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
  88   uint _allocated_heapregions_length;
  89 
  90   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
  91   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
  92 
  93   // Pass down commit calls to the VirtualSpace.
  94   void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
  95 
  96   // Notify other data structures about change in the heap layout.
  97   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
  98 
  99   // Find a contiguous set of empty or uncommitted regions of length num and return
 100   // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
 101   // If only_empty is true, only empty regions are considered.
 102   // Searches from bottom to top of the heap, doing a first-fit.
 103   uint find_contiguous(size_t num, bool only_empty);
 104   // Finds the next sequence of unavailable regions starting from start_idx. Returns the
 105   // length of the sequence found. If this result is zero, no such sequence could be found,
 106   // otherwise res_idx indicates the start index of these regions.
 107   uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
 108   // Finds the next sequence of empty regions starting from start_idx, going backwards in
 109   // the heap. Returns the length of the sequence found. If this value is zero, no
 110   // sequence could be found, otherwise res_idx contains the start index of this range.
 111   uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
 112 
 113 protected:
 114   G1HeapRegionTable _regions;
 115   G1RegionToSpaceMapper* _heap_mapper;
 116   FreeRegionList _free_list;
 117   void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
 118   void uncommit_regions(uint index, size_t num_regions = 1);
 119   // Allocate a new HeapRegion for the given index.
 120   HeapRegion* new_heap_region(uint hrm_index);
 121 #ifdef ASSERT
 122 public:
 123   bool is_free(HeapRegion* hr) const;
 124 #endif
 125 public:
 126   // Empty constructor, we'll initialize it with the initialize() method.
 127   HeapRegionManager();
 128 
 129   static HeapRegionManager* create_manager(G1CollectedHeap* heap, CollectorPolicy* policy);
 130 
 131   void initialize(G1RegionToSpaceMapper* heap_storage,
 132                   G1RegionToSpaceMapper* prev_bitmap,
 133                   G1RegionToSpaceMapper* next_bitmap,
 134                   G1RegionToSpaceMapper* bot,
 135                   G1RegionToSpaceMapper* cardtable,
 136                   G1RegionToSpaceMapper* card_counts);
 137 
 138   // Prepare heap regions before and after full collection.
 139   // Nothing to be done in this class.
 140   virtual void prepare_for_full_collection_start() {}
 141   virtual void prepare_for_full_collection_end() {}
 142 
 143   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
 144   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
 145   // the heap from the lowest address, this region (and its associated data
 146   // structures) are available and we do not need to check further.
 147   virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
 148 
 149   // Return the HeapRegion at the given index. Assume that the index
 150   // is valid.
 151   inline HeapRegion* at(uint index) const;
 152 
 153   // Return the HeapRegion at the given index, NULL if the index
 154   // is for an unavailable region.
 155   inline HeapRegion* at_or_null(uint index) const;
 156 
 157   // Returns whether the given region is available for allocation.
 158   bool is_available(uint region) const;
 159 
 160   // Return the next region (by index) that is part of the same
 161   // humongous object that hr is part of.
 162   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
 163 
 164   // If addr is within the committed space return its corresponding
 165   // HeapRegion, otherwise return NULL.
 166   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 167 
 168   // Insert the given region into the free region list.
 169   inline void insert_into_free_list(HeapRegion* hr);
 170 
 171   // Insert the given region list into the global free region list.
 172   void insert_list_into_free_list(FreeRegionList* list) {
 173     _free_list.add_ordered(list);
 174   }
 175 
 176   virtual HeapRegion* allocate_free_region(bool is_old) {
 177     HeapRegion* hr = _free_list.remove_region(is_old);
 178 
 179     if (hr != NULL) {
 180       assert(hr->next() == NULL, "Single region should not have next");
 181       assert(is_available(hr->hrm_index()), "Must be committed");
 182     }
 183     return hr;
 184   }
 185 
 186   inline void allocate_free_regions_starting_at(uint first, uint num_regions);
 187 
 188   // Remove all regions from the free list.
 189   void remove_all_free_regions() {
 190     _free_list.remove_all();
 191   }
 192 
 193   // Return the number of committed free regions in the heap.
 194   uint num_free_regions() const {
 195     return _free_list.length();
 196   }
 197 
 198   size_t total_free_bytes() const {
 199     return num_free_regions() * HeapRegion::GrainBytes;
 200   }
 201 
 202   // Return the number of available (uncommitted) regions.
 203   uint available() const { return max_length() - length(); }
 204 
 205   // Return the number of regions that have been committed in the heap.
 206   uint length() const { return _num_committed; }
 207 
 208   // Return the maximum number of regions in the heap.
 209   uint max_length() const { return (uint)_regions.length(); }
 210   
 211   // Return maximum number of regions that heap can expand to.
 212   virtual uint max_expandable_length() const { return (uint)_regions.length(); }
 213 
 214   MemoryUsage get_auxiliary_data_memory_usage() const;
 215 
 216   MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
 217 
 218   // Expand the sequence to reflect that the heap has grown. Either create new
 219   // HeapRegions, or re-use existing ones. Returns the number of regions the
 220   // sequence was expanded by. If a HeapRegion allocation fails, the resulting
 221   // number of regions might be smaller than what's desired.
 222   virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
 223 
 224   // Makes sure that the regions from start to start+num_regions-1 are available
 225   // for allocation. Returns the number of regions that were committed to achieve
 226   // this.
 227   virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
 228 
 229   // Find a contiguous set of empty regions of length num. Returns the start index of
 230   // that set, or G1_NO_HRM_INDEX.
 231   virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
 232   // Find a contiguous set of empty or unavailable regions of length num. Returns the
 233   // start index of that set, or G1_NO_HRM_INDEX.
 234   virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 235 
 236   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 237 
 238   // Find the highest free or uncommitted region in the reserved heap,
 239   // and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
 240   // Set the 'expanded' boolean true if a new region was committed.
 241   virtual uint find_highest_free(bool* expanded);
 242 
 243   // Allocate the regions that contain the address range specified, committing the
 244   // regions if necessary. Return false if any of the regions is already committed
 245   // and not free, and return the number of regions newly committed in commit_count.
 246   bool allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers);
 247 
 248   // Apply blk->do_heap_region() on all committed regions in address order,
 249   // terminating the iteration early if do_heap_region() returns true.
 250   void iterate(HeapRegionClosure* blk) const;
 251 
 252   void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const;
 253 
 254   // Uncommit up to num_regions_to_remove regions that are completely free.
 255   // Return the actual number of uncommitted regions.
 256   virtual uint shrink_by(uint num_regions_to_remove);
 257 
 258   // Uncommit a number of regions starting at the specified index, which must be available,
 259   // empty, and free.
 260   void shrink_at(uint index, size_t num_regions);
 261 
 262   virtual void verify();
 263 
 264   // Do some sanity checking.
 265   void verify_optional() PRODUCT_RETURN;
 266 };
 267 
 268 // The HeapRegionClaimer is used during parallel iteration over heap regions,
 269 // allowing workers to claim heap regions, gaining exclusive rights to these regions.
 270 class HeapRegionClaimer : public StackObj {
 271   uint           _n_workers;
 272   uint           _n_regions;
 273   volatile uint* _claims;
 274 
 275   static const uint Unclaimed = 0;
 276   static const uint Claimed   = 1;
 277 
 278  public:
 279   HeapRegionClaimer(uint n_workers);
 280   ~HeapRegionClaimer();
 281 
 282   inline uint n_regions() const {
 283     return _n_regions;
 284   }
 285 
 286   // Return a start offset given a worker id.
 287   uint offset_for_worker(uint worker_id) const;
 288 
 289   // Check if region has been claimed with this HRClaimer.
 290   bool is_region_claimed(uint region_index) const;
 291 
 292   // Claim the given region, returns true if successfully claimed.
 293   bool claim_region(uint region_index);
 294 };
 295 #endif // SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP