1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2018, SAP.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP_
  27 #define SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP_
  28 
  29 #include "memory/allocation.hpp"
  30 #include "utilities/debug.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 
  34 namespace metaspace {
  35 namespace internals {
  36 
  37 class Metachunk;
  38 
  39 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
  40 template <typename T> struct all_ones  { static const T value; };
  41 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
  42 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
  43 
  44 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
  45 // keeps information about
  46 // - where a chunk starts
  47 // - whether a chunk is in-use or free
  48 // A bit in this bitmap represents one range of memory in the smallest
  49 // chunk size (SpecializedChunk or ClassSpecializedChunk).
  50 class OccupancyMap : public CHeapObj<mtInternal> {
  51 
  52   // The address range this map covers.
  53   const MetaWord* const _reference_address;
  54   const size_t _word_size;
  55 
  56   // The word size of a specialized chunk, aka the number of words one
  57   // bit in this map represents.
  58   const size_t _smallest_chunk_word_size;
  59 
  60   // map data
  61   // Data are organized in two bit layers:
  62   // The first layer is the chunk-start-map. Here, a bit is set to mark
  63   // the corresponding region as the head of a chunk.
  64   // The second layer is the in-use-map. Here, a set bit indicates that
  65   // the corresponding belongs to a chunk which is in use.
  66   uint8_t* _map[2];
  67 
  68   enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
  69 
  70   // length, in bytes, of bitmap data
  71   size_t _map_size;
  72 
  73   // Returns true if bit at position pos at bit-layer layer is set.
  74   bool get_bit_at_position(unsigned pos, unsigned layer) const {
  75     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
  76     const unsigned byteoffset = pos / 8;
  77     assert(byteoffset < _map_size,
  78            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
  79     const unsigned mask = 1 << (pos % 8);
  80     return (_map[layer][byteoffset] & mask) > 0;
  81   }
  82 
  83   // Changes bit at position pos at bit-layer layer to value v.
  84   void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
  85     assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
  86     const unsigned byteoffset = pos / 8;
  87     assert(byteoffset < _map_size,
  88            "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
  89     const unsigned mask = 1 << (pos % 8);
  90     if (v) {
  91       _map[layer][byteoffset] |= mask;
  92     } else {
  93       _map[layer][byteoffset] &= ~mask;
  94     }
  95   }
  96 
  97   // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
  98   // pos is 32/64 aligned and num_bits is 32/64.
  99   // This is the typical case when coalescing to medium chunks, whose size is
 100   // 32 or 64 times the specialized chunk size (depending on class or non class
 101   // case), so they occupy 64 bits which should be 64bit aligned, because
 102   // chunks are chunk-size aligned.
 103   template <typename T>
 104   bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
 105     assert(_map_size > 0, "not initialized");
 106     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 107     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
 108     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
 109     const size_t byteoffset = pos / 8;
 110     assert(byteoffset <= (_map_size - sizeof(T)),
 111            "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 112     const T w = *(T*)(_map[layer] + byteoffset);
 113     return w > 0 ? true : false;
 114   }
 115 
 116   // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
 117   bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
 118     if (pos % 32 == 0 && num_bits == 32) {
 119       return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
 120     } else if (pos % 64 == 0 && num_bits == 64) {
 121       return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
 122     } else {
 123       for (unsigned n = 0; n < num_bits; n ++) {
 124         if (get_bit_at_position(pos + n, layer)) {
 125           return true;
 126         }
 127       }
 128     }
 129     return false;
 130   }
 131 
 132   // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
 133   bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
 134     assert(word_size % _smallest_chunk_word_size == 0,
 135         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 136     const unsigned pos = get_bitpos_for_address(p);
 137     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 138     return is_any_bit_set_in_region(pos, num_bits, layer);
 139   }
 140 
 141   // Optimized case of set_bits_of_region for 32/64bit aligned access:
 142   // pos is 32/64 aligned and num_bits is 32/64.
 143   // This is the typical case when coalescing to medium chunks, whose size
 144   // is 32 or 64 times the specialized chunk size (depending on class or non
 145   // class case), so they occupy 64 bits which should be 64bit aligned,
 146   // because chunks are chunk-size aligned.
 147   template <typename T>
 148   void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 149     assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
 150            (unsigned)(sizeof(T) * 8), pos);
 151     assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
 152            num_bits, (unsigned)(sizeof(T) * 8));
 153     const size_t byteoffset = pos / 8;
 154     assert(byteoffset <= (_map_size - sizeof(T)),
 155            "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
 156     T* const pw = (T*)(_map[layer] + byteoffset);
 157     *pw = v ? all_ones<T>::value : (T) 0;
 158   }
 159 
 160   // Set all bits in a region starting at pos to a value.
 161   void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
 162     assert(_map_size > 0, "not initialized");
 163     assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
 164     if (pos % 32 == 0 && num_bits == 32) {
 165       set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
 166     } else if (pos % 64 == 0 && num_bits == 64) {
 167       set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
 168     } else {
 169       for (unsigned n = 0; n < num_bits; n ++) {
 170         set_bit_at_position(pos + n, layer, v);
 171       }
 172     }
 173   }
 174 
 175   // Helper: sets all bits in a region [p, p+word_size).
 176   void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
 177     assert(word_size % _smallest_chunk_word_size == 0,
 178         "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
 179     const unsigned pos = get_bitpos_for_address(p);
 180     const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
 181     set_bits_of_region(pos, num_bits, layer, v);
 182   }
 183 
 184   // Helper: given an address, return the bit position representing that address.
 185   unsigned get_bitpos_for_address(const MetaWord* p) const {
 186     assert(_reference_address != NULL, "not initialized");
 187     assert(p >= _reference_address && p < _reference_address + _word_size,
 188            "Address %p out of range for occupancy map [%p..%p).",
 189             p, _reference_address, _reference_address + _word_size);
 190     assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
 191            "Address not aligned (%p).", p);
 192     const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
 193     assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
 194     return (unsigned) d;
 195   }
 196 
 197  public:
 198 
 199   OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size);
 200   ~OccupancyMap();
 201 
 202   // Returns true if at address x a chunk is starting.
 203   bool chunk_starts_at_address(MetaWord* p) const {
 204     const unsigned pos = get_bitpos_for_address(p);
 205     return get_bit_at_position(pos, layer_chunk_start_map);
 206   }
 207 
 208   void set_chunk_starts_at_address(MetaWord* p, bool v) {
 209     const unsigned pos = get_bitpos_for_address(p);
 210     set_bit_at_position(pos, layer_chunk_start_map, v);
 211   }
 212 
 213   // Removes all chunk-start-bits inside a region, typically as a
 214   // result of a chunk merge.
 215   void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
 216     set_bits_of_region(p, word_size, layer_chunk_start_map, false);
 217   }
 218 
 219   // Returns true if there are life (in use) chunks in the region limited
 220   // by [p, p+word_size).
 221   bool is_region_in_use(MetaWord* p, size_t word_size) const {
 222     return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
 223   }
 224 
 225   // Marks the region starting at p with the size word_size as in use
 226   // or free, depending on v.
 227   void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
 228     set_bits_of_region(p, word_size, layer_in_use_map, v);
 229   }
 230 
 231   // Verify occupancy map for the address range [from, to).
 232   // We need to tell it the address range, because the memory the
 233   // occupancy map is covering may not be fully comitted yet.
 234   DEBUG_ONLY(void verify(MetaWord* from, MetaWord* to);)
 235 
 236   // Verify that a given chunk is correctly accounted for in the bitmap.
 237   DEBUG_ONLY(void verify_for_chunk(Metachunk* chunk);)
 238 
 239 };
 240 
 241 } // namespace metaspace
 242 } // namespace internals
 243 
 244 #endif /* SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP_ */