< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page
rev 49010 : [mq]: metaspace-coalesc-patch
rev 49011 : [mq]: metaspace-coal-2

@@ -54,62 +54,100 @@
 
 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
 
 // Set this constant to enable slow integrity checking of the free chunk lists
-const bool metaspace_slow_verify = false;
+const bool metaspace_slow_verify = DEBUG_ONLY(true) NOT_DEBUG(false);
+
+// Helper function that does a bunch of checks for a chunk.
+DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
+
+// Given a Metachunk, update its in-use information (both in the
+// chunk and the occupancy map).
+static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
 
 size_t const allocation_from_dictionary_limit = 4 * K;
 
 MetaWord* last_allocated = 0;
 
 size_t Metaspace::_compressed_class_space_size;
 const MetaspaceTracer* Metaspace::_tracer = NULL;
 
 DEBUG_ONLY(bool Metaspace::_frozen = false;)
 
-// Used in declarations in SpaceManager and ChunkManager
-enum ChunkIndex {
-  ZeroIndex = 0,
-  SpecializedIndex = ZeroIndex,
-  SmallIndex = SpecializedIndex + 1,
-  MediumIndex = SmallIndex + 1,
-  HumongousIndex = MediumIndex + 1,
-  NumberOfFreeLists = 3,
-  NumberOfInUseLists = 4
-};
-
-// Helper, returns a descriptive name for the given index.
-static const char* chunk_size_name(ChunkIndex index) {
-  switch (index) {
-    case SpecializedIndex:
-      return "specialized";
-    case SmallIndex:
-      return "small";
-    case MediumIndex:
-      return "medium";
-    case HumongousIndex:
-      return "humongous";
-    default:
-      return "Invalid index";
-  }
-}
-
 enum ChunkSizes {    // in words.
   ClassSpecializedChunk = 128,
   SpecializedChunk = 128,
   ClassSmallChunk = 256,
   SmallChunk = 512,
   ClassMediumChunk = 4 * K,
   MediumChunk = 8 * K
 };
 
+// Returns size of this chunk type.
+size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
+  assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
+  size_t size = 0;
+  if (is_class) {
+    switch(chunktype) {
+      case SpecializedIndex: size = ClassSpecializedChunk; break;
+      case SmallIndex: size = ClassSmallChunk; break;
+      case MediumIndex: size = ClassMediumChunk; break;
+      default:
+        ShouldNotReachHere();
+    }
+  } else {
+    switch(chunktype) {
+      case SpecializedIndex: size = SpecializedChunk; break;
+      case SmallIndex: size = SmallChunk; break;
+      case MediumIndex: size = MediumChunk; break;
+      default:
+        ShouldNotReachHere();
+    }
+  }
+  return size;
+}
+
+ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
+  if (is_class) {
+    if (size == ClassSpecializedChunk) {
+      return SpecializedIndex;
+    } else if (size == ClassSmallChunk) {
+      return SmallIndex;
+    } else if (size == ClassMediumChunk) {
+      return MediumIndex;
+    } else if (size > ClassMediumChunk) {
+      assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
+      return HumongousIndex;
+    }
+  } else {
+    if (size == SpecializedChunk) {
+      return SpecializedIndex;
+    } else if (size == SmallChunk) {
+      return SmallIndex;
+    } else if (size == MediumChunk) {
+      return MediumIndex;
+    } else if (size > MediumChunk) {
+      assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
+      return HumongousIndex;
+    }
+  }
+  ShouldNotReachHere();
+  return (ChunkIndex)-1;
+}
+
+
 static ChunkIndex next_chunk_index(ChunkIndex i) {
   assert(i < NumberOfInUseLists, "Out of bound");
   return (ChunkIndex) (i+1);
 }
 
+static ChunkIndex prev_chunk_index(ChunkIndex i) {
+  assert(i > ZeroIndex, "Out of bound");
+  return (ChunkIndex) (i-1);
+}
+
 static const char* scale_unit(size_t scale) {
   switch(scale) {
     case 1: return "BYTES";
     case K: return "KB";
     case M: return "MB";

@@ -134,10 +172,13 @@
   //   SpecializedChunk
   //   SmallChunk
   //   MediumChunk
   ChunkList _free_chunks[NumberOfFreeLists];
 
+  // Whether or not this is the class chunkmanager.
+  const bool _is_class;
+
   // Return non-humongous chunk list by its index.
   ChunkList* free_chunks(ChunkIndex index);
 
   // Returns non-humongous chunk list for the given chunk word size.
   ChunkList* find_free_chunks_list(size_t word_size);

@@ -176,10 +217,26 @@
       locked_verify_free_chunks_count();
     }
   }
   void verify_free_chunks_count();
 
+  // Given a pointer to a chunk, attempts to merge it with neighboring
+  // free chunks to form a bigger chunk. Returns true if successful.
+  bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
+
+  // Helper for chunk coalescation:
+  //  Given an address range with 1-n chunks which are all supposed to be
+  //  free and hence currently managed by this ChunkManager, remove them
+  //  from this ChunkManager and mark them as invalid.
+  // - This does not correct the occupancy map.
+  // - This does not adjust the counters in ChunkManager.
+  // - Does not adjust container count counter in containing VirtualSpaceNode.
+  // Returns number of chunks removed.
+  int remove_chunks_in_area(MetaWord* p, size_t word_size);
+
+ public:
+
   struct ChunkManagerStatistics {
     size_t num_by_type[NumberOfFreeLists];
     size_t single_size_by_type[NumberOfFreeLists];
     size_t total_size_by_type[NumberOfFreeLists];
     size_t num_humongous_chunks;

@@ -188,29 +245,35 @@
 
   void locked_get_statistics(ChunkManagerStatistics* stat) const;
   void get_statistics(ChunkManagerStatistics* stat) const;
   static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
 
- public:
 
-  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
-      : _free_chunks_total(0), _free_chunks_count(0) {
-    _free_chunks[SpecializedIndex].set_size(specialized_size);
-    _free_chunks[SmallIndex].set_size(small_size);
-    _free_chunks[MediumIndex].set_size(medium_size);
+  ChunkManager(bool is_class)
+      : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
+    _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
+    _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
+    _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
   }
 
-  // add or delete (return) a chunk to the global freelist.
+  // Add or delete (return) a chunk to the global freelist.
   Metachunk* chunk_freelist_allocate(size_t word_size);
 
   // Map a size to a list index assuming that there are lists
   // for special, small, medium, and humongous chunks.
   ChunkIndex list_index(size_t size);
 
   // Map a given index to the chunk size.
   size_t size_by_index(ChunkIndex index) const;
 
+  bool is_class() const { return _is_class; }
+
+  // Convenience accessors.
+  size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
+  size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
+  size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
+
   // Take a chunk from the ChunkManager. The chunk is expected to be in
   // the chunk manager (the freelist if non-humongous, the dictionary if
   // humongous).
   void remove_chunk(Metachunk* chunk);
 

@@ -389,45 +452,346 @@
 
   static size_t min_dictionary_size()   { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
   void print_on(outputStream* st) const;
 };
 
+// Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
+template <typename T> struct all_ones  { static const T value; };
+template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
+template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
+
+// The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
+// keeps information about
+// - where a chunk starts
+// - whether a chunk is in-use or free
+// A bit in this bitmap represents one range of memory in the smallest
+// chunk size (SpecializedChunk or ClassSpecializedChunk).
+class OccupancyMap : public CHeapObj<mtInternal> {
+
+  // The address range this map covers.
+  const MetaWord* const _reference_address;
+  const size_t _word_size;
+
+  // The word size of a specialized chunk, aka the number of words one
+  // bit in this map represents.
+  const size_t _smallest_chunk_word_size;
+
+  // map data
+  // Data are organized in two bit layers:
+  // The first layer is the chunk-start-map. Here, a bit is set to mark
+  // the corresponding region as the head of a chunk.
+  // The second layer is the in-use-map. Here, a set bit indicates that
+  // the corresponding belongs to a chunk which is in use.
+  uint8_t* _map[2];
+
+  enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
+
+  // length, in bytes, of bitmap data
+  size_t _map_size;
+
+  // Returns true if bit at position pos at bit-layer layer is set.
+  bool get_bit_at_position(unsigned pos, unsigned layer) const {
+    assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
+    const unsigned byteoffset = pos / 8;
+    assert(byteoffset < _map_size,
+           "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+    const unsigned mask = 1 << (pos % 8);
+    return (_map[layer][byteoffset] & mask) > 0;
+  }
+
+  // Changes bit at position pos at bit-layer layer to value v.
+  void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
+    assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
+    const unsigned byteoffset = pos / 8;
+    assert(byteoffset < _map_size,
+           "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+    const unsigned mask = 1 << (pos % 8);
+    if (v) {
+      _map[layer][byteoffset] |= mask;
+    } else {
+      _map[layer][byteoffset] &= ~mask;
+    }
+  }
+
+  // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
+  // pos is 32/64 aligned and num_bits is 32/64.
+  // This is the typical case when coalescing to medium chunks, whose size is
+  // 32 or 64 times the specialized chunk size (depending on class or non class
+  // case), so they occupy 64 bits which should be 64bit aligned, because
+  // chunks are chunk-size aligned.
+  template <typename T>
+  bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
+    assert(_map_size > 0, "not initialized");
+    assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
+    assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
+    assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
+    const size_t byteoffset = pos / 8;
+    assert(byteoffset <= (_map_size - sizeof(T)),
+           "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+    const T w = *(T*)(_map[layer] + byteoffset);
+    return w > 0 ? true : false;
+  }
+
+  // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
+  bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
+    if (pos % 32 == 0 && num_bits == 32) {
+      return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
+    } else if (pos % 64 == 0 && num_bits == 64) {
+      return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
+    } else {
+      for (unsigned n = 0; n < num_bits; n ++) {
+        if (get_bit_at_position(pos + n, layer)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
+  bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
+    assert(word_size % _smallest_chunk_word_size == 0,
+        "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
+    const unsigned pos = get_bitpos_for_address(p);
+    const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
+    return is_any_bit_set_in_region(pos, num_bits, layer);
+  }
+
+  // Optimized case of set_bits_of_region for 32/64bit aligned access:
+  // pos is 32/64 aligned and num_bits is 32/64.
+  // This is the typical case when coalescing to medium chunks, whose size
+  // is 32 or 64 times the specialized chunk size (depending on class or non
+  // class case), so they occupy 64 bits which should be 64bit aligned,
+  // because chunks are chunk-size aligned.
+  template <typename T>
+  void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
+    assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
+           (unsigned)(sizeof(T) * 8), pos);
+    assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
+           num_bits, (unsigned)(sizeof(T) * 8));
+    const size_t byteoffset = pos / 8;
+    assert(byteoffset <= (_map_size - sizeof(T)),
+           "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+    T* const pw = (T*)(_map[layer] + byteoffset);
+    *pw = v ? all_ones<T>::value : (T) 0;
+  }
+
+  // Set all bits in a region starting at pos to a value.
+  void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
+    assert(_map_size > 0, "not initialized");
+    assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
+    if (pos % 32 == 0 && num_bits == 32) {
+      set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
+    } else if (pos % 64 == 0 && num_bits == 64) {
+      set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
+    } else {
+      for (unsigned n = 0; n < num_bits; n ++) {
+        set_bit_at_position(pos + n, layer, v);
+      }
+    }
+  }
+
+  // Helper: sets all bits in a region [p, p+word_size).
+  void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
+    assert(word_size % _smallest_chunk_word_size == 0,
+        "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
+    const unsigned pos = get_bitpos_for_address(p);
+    const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
+    set_bits_of_region(pos, num_bits, layer, v);
+  }
+
+  // Helper: given an address, return the bit position representing that address.
+  unsigned get_bitpos_for_address(const MetaWord* p) const {
+    assert(_reference_address != NULL, "not initialized");
+    assert(p >= _reference_address && p < _reference_address + _word_size,
+           "Address %p out of range for occupancy map [%p..%p).",
+            p, _reference_address, _reference_address + _word_size);
+    assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
+           "Address not aligned (%p).", p);
+    const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
+    assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
+    return (unsigned) d;
+  }
+
+ public:
+
+  OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
+    _reference_address(reference_address), _word_size(word_size),
+    _smallest_chunk_word_size(smallest_chunk_word_size) {
+    assert(reference_address != NULL, "invalid reference address");
+    assert(is_aligned(reference_address, smallest_chunk_word_size),
+           "Reference address not aligned to smallest chunk size.");
+    assert(is_aligned(word_size, smallest_chunk_word_size),
+           "Word_size shall be a multiple of the smallest chunk size.");
+    // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
+    size_t num_bits = word_size / smallest_chunk_word_size;
+    _map_size = (num_bits + 7) / 8;
+    assert(_map_size * 8 >= num_bits, "sanity");
+    _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
+    _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
+    assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
+    memset(_map[1], 0, _map_size);
+    memset(_map[0], 0, _map_size);
+    // Sanity test: the first respectively last possible chunk start address in
+    // the covered range shall map to the first and last bit in the bitmap.
+    assert(get_bitpos_for_address(reference_address) == 0,
+      "First chunk address in range must map to fist bit in bitmap.");
+    assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
+      "Last chunk address in range must map to last bit in bitmap.");
+  }
+
+  ~OccupancyMap() {
+    os::free(_map[0]);
+    os::free(_map[1]);
+  }
+
+  // Returns true if at address x a chunk is starting.
+  bool chunk_starts_at_address(MetaWord* p) const {
+    const unsigned pos = get_bitpos_for_address(p);
+    return get_bit_at_position(pos, layer_chunk_start_map);
+  }
+
+  void set_chunk_starts_at_address(MetaWord* p, bool v) {
+    const unsigned pos = get_bitpos_for_address(p);
+    set_bit_at_position(pos, layer_chunk_start_map, v);
+  }
+
+  // Removes all chunk-start-bits inside a region, typically as a
+  // result of a coalescation.
+  void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
+    set_bits_of_region(p, word_size, layer_chunk_start_map, false);
+  }
+
+  // Returns true if there are life (in use) chunks in the region limited
+  // by [p, p+word_size).
+  bool is_region_in_use(MetaWord* p, size_t word_size) const {
+    return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
+  }
+
+  // Marks the region starting at p with the size word_size as in use
+  // or free, depending on v.
+  void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
+    set_bits_of_region(p, word_size, layer_in_use_map, v);
+  }
+
+#ifdef ASSERT
+  // Verify occupancy map for the address range [from, to).
+  // We need to tell it the address range, because the memory the
+  // occupancy map is covering may not be fully comitted yet.
+  void verify(MetaWord* from, MetaWord* to) {
+    Metachunk* chunk = NULL;
+    int nth_bit_for_chunk = 0;
+    MetaWord* chunk_end = NULL;
+    for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
+      const unsigned pos = get_bitpos_for_address(p);
+      // Check the chunk-starts-info:
+      if (get_bit_at_position(pos, layer_chunk_start_map)) {
+        // Chunk start marked in bitmap.
+        chunk = (Metachunk*) p;
+        if (chunk_end != NULL) {
+          assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
+                 "the next chunk to start at %p).", p, chunk_end);
+        }
+        assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
+        if (chunk->get_chunk_type() != HumongousIndex) {
+          guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
+        }
+        chunk_end = p + chunk->word_size();
+        nth_bit_for_chunk = 0;
+        assert(chunk_end <= to, "Chunk end overlaps test address range.");
+      } else {
+        // No chunk start marked in bitmap.
+        assert(chunk != NULL, "Chunk should start at start of address range.");
+        assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
+        nth_bit_for_chunk ++;
+      }
+      // Check the in-use-info:
+      const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
+      if (in_use_bit) {
+        assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
+               chunk, nth_bit_for_chunk);
+      } else {
+        assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
+               chunk, nth_bit_for_chunk);
+      }
+    }
+  }
+
+  // Verify that a given chunk is correctly accounted for in the bitmap.
+  void verify_for_chunk(Metachunk* chunk) {
+    assert(chunk_starts_at_address((MetaWord*) chunk),
+           "No chunk start marked in map for chunk %p.", chunk);
+    // For chunks larger than the minimal chunk size, no other chunk
+    // must start in its area.
+    if (chunk->word_size() > _smallest_chunk_word_size) {
+      assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
+                                       chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
+             "No chunk must start within another chunk.");
+    }
+    if (!chunk->is_tagged_free()) {
+      assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
+             "Chunk %p is in use but marked as free in map (%d %d).",
+             chunk, chunk->get_chunk_type(), chunk->get_origin());
+    } else {
+      assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
+             "Chunk %p is free but marked as in-use in map (%d %d).",
+             chunk, chunk->get_chunk_type(), chunk->get_origin());
+    }
+  }
+
+#endif // ASSERT
+
+};
+
 // A VirtualSpaceList node.
 class VirtualSpaceNode : public CHeapObj<mtClass> {
   friend class VirtualSpaceList;
 
   // Link to next VirtualSpaceNode
   VirtualSpaceNode* _next;
 
+  // Whether this node is contained in class or metaspace.
+  const bool _is_class;
+
   // total in the VirtualSpace
   MemRegion _reserved;
   ReservedSpace _rs;
   VirtualSpace _virtual_space;
   MetaWord* _top;
   // count of chunks contained in this VirtualSpace
   uintx _container_count;
 
+  OccupancyMap* _occupancy_map;
+
   // Convenience functions to access the _virtual_space
   char* low()  const { return virtual_space()->low(); }
   char* high() const { return virtual_space()->high(); }
 
   // The first Metachunk will be allocated at the bottom of the
   // VirtualSpace
   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 
   // Committed but unused space in the virtual space
   size_t free_words_in_vs() const;
+
+  // True if this node belongs to class metaspace.
+  bool is_class() const { return _is_class; }
+
  public:
 
-  VirtualSpaceNode(size_t byte_size);
-  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
+  VirtualSpaceNode(bool is_class, size_t byte_size);
+  VirtualSpaceNode(bool is_class, ReservedSpace rs) :
+    _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
   ~VirtualSpaceNode();
 
   // Convenience functions for logical bottom and end
   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 
+  const OccupancyMap* occupancy_map() const { return _occupancy_map; }
+  OccupancyMap* occupancy_map() { return _occupancy_map; }
+
   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
 
   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 

@@ -491,10 +855,14 @@
   void mangle();
 #endif
 
   void print_on(outputStream* st) const;
   void print_map(outputStream* st, bool is_class) const;
+
+  // Verify all chunks in this node.
+  void verify();
+
 };
 
 #define assert_is_aligned(value, alignment)                  \
   assert(is_aligned((value), (alignment)),                   \
          SIZE_FORMAT_HEX " is not aligned to "               \

@@ -513,11 +881,12 @@
 
   return false;
 }
 
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
+  _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
   assert_is_aligned(bytes, Metaspace::reserve_alignment());
   bool large_pages = should_commit_large_pages_when_reserving(bytes);
   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 
   if (_rs.is_reserved()) {

@@ -529,118 +898,121 @@
     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   }
 }
 
 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
+  DEBUG_ONLY(this->verify();)
   Metachunk* chunk = first_chunk();
   Metachunk* invalid_chunk = (Metachunk*) top();
   while (chunk < invalid_chunk ) {
     assert(chunk->is_tagged_free(), "Should be tagged free");
     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
     chunk_manager->remove_chunk(chunk);
+    DEBUG_ONLY(chunk->remove_sentinel();)
     assert(chunk->next() == NULL &&
            chunk->prev() == NULL,
            "Was not removed from its list");
     chunk = (Metachunk*) next;
   }
 }
 
 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
 
-  // Format:
-  // <ptr>
-  // <ptr>  . .. .               .  ..
-  //        SSxSSMMMMMMMMMMMMMMMMsssXX
-  //        112114444444444444444
-  // <ptr>  . .. .               .  ..
-  //        SSxSSMMMMMMMMMMMMMMMMsssXX
-  //        112114444444444444444
-
   if (bottom() == top()) {
     return;
   }
 
-  // First line: dividers for every med-chunk-sized interval
-  // Second line: a dot for the start of a chunk
-  // Third line: a letter per chunk type (x,s,m,h), uppercase if in use.
-
   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 
   int line_len = 100;
   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
   line_len = (int)(section_len / spec_chunk_size);
 
-  char* line1 = (char*)os::malloc(line_len, mtInternal);
-  char* line2 = (char*)os::malloc(line_len, mtInternal);
-  char* line3 = (char*)os::malloc(line_len, mtInternal);
+#ifdef ASSERT
+#define NUM_LINES 4
+#else
+#define NUM_LINES 2
+#endif
+
+  char* lines[NUM_LINES];
+  for (int i = 0; i < NUM_LINES; i ++) {
+    lines[i] = (char*)os::malloc(line_len, mtInternal);
+  }
   int pos = 0;
   const MetaWord* p = bottom();
   const Metachunk* chunk = (const Metachunk*)p;
   const MetaWord* chunk_end = p + chunk->word_size();
   while (p < top()) {
     if (pos == line_len) {
       pos = 0;
+      for (int i = 0; i < NUM_LINES; i ++) {
       st->fill_to(22);
-      st->print_raw(line1, line_len);
-      st->cr();
-      st->fill_to(22);
-      st->print_raw(line2, line_len);
+        st->print_raw(lines[i], line_len);
       st->cr();
     }
+    }
     if (pos == 0) {
       st->print(PTR_FORMAT ":", p2i(p));
     }
     if (p == chunk_end) {
       chunk = (Metachunk*)p;
       chunk_end = p + chunk->word_size();
     }
-    if (p == (const MetaWord*)chunk) {
-      // chunk starts.
-      line1[pos] = '.';
-    } else {
-      line1[pos] = ' ';
-    }
+    // line 1: chunk starting points (a dot if that area is a chunk start).
+    lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
+
     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
     // chunk is in use.
     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
     if (chunk->word_size() == spec_chunk_size) {
-      line2[pos] = chunk_is_free ? 'x' : 'X';
+      lines[1][pos] = chunk_is_free ? 'x' : 'X';
     } else if (chunk->word_size() == small_chunk_size) {
-      line2[pos] = chunk_is_free ? 's' : 'S';
+      lines[1][pos] = chunk_is_free ? 's' : 'S';
     } else if (chunk->word_size() == med_chunk_size) {
-      line2[pos] = chunk_is_free ? 'm' : 'M';
+      lines[1][pos] = chunk_is_free ? 'm' : 'M';
    } else if (chunk->word_size() > med_chunk_size) {
-      line2[pos] = chunk_is_free ? 'h' : 'H';
+      lines[1][pos] = chunk_is_free ? 'h' : 'H';
     } else {
       ShouldNotReachHere();
     }
+
+#ifdef ASSERT
+    // Line 3: chunk origin
+    const ChunkOrigin origin = chunk->get_origin();
+    lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
+
+    // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
+    //         but were never used.
+    lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
+#endif
+
     p += spec_chunk_size;
     pos ++;
   }
   if (pos > 0) {
+    for (int i = 0; i < NUM_LINES; i ++) {
     st->fill_to(22);
-    st->print_raw(line1, pos);
-    st->cr();
-    st->fill_to(22);
-    st->print_raw(line2, pos);
+      st->print_raw(lines[i], line_len);
     st->cr();
   }
-  os::free(line1);
-  os::free(line2);
-  os::free(line3);
+  }
+  for (int i = 0; i < NUM_LINES; i ++) {
+    os::free(lines[i]);
+  }
 }
 
 
 #ifdef ASSERT
 uintx VirtualSpaceNode::container_count_slow() {
   uintx count = 0;
   Metachunk* chunk = first_chunk();
   Metachunk* invalid_chunk = (Metachunk*) top();
   while (chunk < invalid_chunk ) {
     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+    do_verify_chunk(chunk);
     // Don't count the chunks on the free lists.  Those are
     // still part of the VirtualSpaceNode but not currently
     // counted.
     if (!chunk->is_tagged_free()) {
       count++;

@@ -649,10 +1021,61 @@
   }
   return count;
 }
 #endif
 
+// Verify all chunks in this list node.
+void VirtualSpaceNode::verify() {
+  DEBUG_ONLY(verify_container_count();)
+  Metachunk* chunk = first_chunk();
+  Metachunk* invalid_chunk = (Metachunk*) top();
+  // Iterate the chunks in this node and verify each chunk.
+  // Also verify that space is ideally coalesced, i.e. we did not miss any coalescation chances (there shall be no free chunks
+  // where a larger free chunk could exist).
+  const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
+  const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
+  int num_free_chunks_since_last_med_boundary = -1;
+  int num_free_chunks_since_last_small_boundary = -1;
+  while (chunk < invalid_chunk ) {
+    // verify each chunk.
+    DEBUG_ONLY(do_verify_chunk(chunk);)
+    // Test for missed coalescation opportunities: count number of free chunks since last chunk boundary.
+    // Reset the counter when encountering a non-free chunk.
+    if (chunk->get_chunk_type() != HumongousIndex) {
+      if (chunk->is_tagged_free()) {
+        if (is_aligned(chunk, size_small)) {
+          assert(num_free_chunks_since_last_small_boundary <= 1,
+                 "Missed coalescation opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
+          num_free_chunks_since_last_small_boundary = 0;
+        } else if (num_free_chunks_since_last_small_boundary != -1) {
+          num_free_chunks_since_last_small_boundary ++;
+        }
+        if (is_aligned(chunk, size_med)) {
+          assert(num_free_chunks_since_last_med_boundary <= 1,
+                 "Missed coalescation opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
+          num_free_chunks_since_last_med_boundary = 0;
+        } else if (num_free_chunks_since_last_med_boundary != -1) {
+          num_free_chunks_since_last_med_boundary ++;
+        }
+      } else {
+        // Encountering a non-free chunk, reset counters.
+        num_free_chunks_since_last_med_boundary = -1;
+        num_free_chunks_since_last_small_boundary = -1;
+      }
+    } else {
+      // One cannot merge areas with a humongous chunk in the middle. Reset counters.
+      num_free_chunks_since_last_med_boundary = -1;
+      num_free_chunks_since_last_small_boundary = -1;
+    }
+
+    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+    chunk = (Metachunk*) next;
+  }
+  // Also verify the whole occupancy map
+  DEBUG_ONLY(occupancy_map()->verify(this->bottom(), this->top());)
+}
+
 // List of VirtualSpaces for metadata allocation.
 class VirtualSpaceList : public CHeapObj<mtClass> {
   friend class VirtualSpaceNode;
 
   enum VirtualSpaceSizes {

@@ -920,12 +1343,10 @@
   Metachunk* get_new_chunk(size_t chunk_word_size);
 
   // Block allocation and deallocation.
   // Allocates a block from the current chunk
   MetaWord* allocate(size_t word_size);
-  // Allocates a block from a small chunk
-  MetaWord* get_small_chunk_and_allocate(size_t word_size);
 
   // Helper for allocations
   MetaWord* allocate_work(size_t word_size);
 
   // Returns a block to the per manager freelist

@@ -1076,10 +1497,13 @@
 
 // VirtualSpaceNode methods
 
 VirtualSpaceNode::~VirtualSpaceNode() {
   _rs.release();
+  if (_occupancy_map != NULL) {
+    delete _occupancy_map;
+  }
 #ifdef ASSERT
   size_t word_size = sizeof(*this) / BytesPerWord;
   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 #endif
 }

@@ -1099,10 +1523,102 @@
 
 // Allocates the chunk from the virtual space only.
 // This interface is also used internally for debugging.  Not all
 // chunks removed here are necessarily used for allocation.
 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
+  // Non-humongous chunks are to be allocated aligned to their chunk
+  // size. So, start addresses of medium chunks are aligned to medium
+  // chunk size, those of small chunks to small chunk size and so
+  // forth. This facilitates free chunk coalescation and reduces
+  // fragmentation. Chunk sizes are spec < small < medium, with each
+  // larger chunk size being a multiple of the next smaller chunk
+  // size.
+  // Because of this alignment, me may need to create a number of padding
+  // chunks. These chunks are created and added to the freelist.
+
+  // The chunk manager to which we will give our padding chunks.
+  ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
+
+  // shorthands
+  const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
+  const size_t small_word_size = chunk_manager->small_chunk_word_size();
+  const size_t med_word_size = chunk_manager->medium_chunk_word_size();
+
+  assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
+         chunk_word_size >= med_word_size, "Invalid chunk size requested.");
+
+  // Chunk alignment (in bytes) == chunk size unless humongous.
+  // Humongous chunks are aligned to the smallest chunk size (spec).
+  const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
+                                           spec_word_size : chunk_word_size) * sizeof(MetaWord);
+
+  // Do we have enough space to create the requested chunk plus
+  // any padding chunks needed?
+  MetaWord* const next_aligned =
+    static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
+  if (!is_available((next_aligned - top()) + chunk_word_size)) {
+    return NULL;
+  }
+
+  // Before allocating the requested chunk, allocate padding chunks if necessary.
+  // We only need to do this for small or medium chunks: specialized chunks are the
+  // smallest size, hence always aligned. Homungous chunks are allocated unaligned
+  // (implicitly, also aligned to smallest chunk size).
+  if (chunk_word_size == med_word_size || chunk_word_size == small_word_size) {
+
+    if (next_aligned > top()) {
+      log_trace(gc, metaspace, freelist)("Coalescation (%s): creating padding chunks between %p and %p...",
+          (is_class() ? "class space " : "metaspace"),
+          top(), next_aligned);
+    }
+
+    // Allocate padding chunks.
+    while (next_aligned > top()) {
+      size_t padding_chunk_word_size = small_word_size;
+      if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
+        assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
+        padding_chunk_word_size = spec_word_size;
+      }
+      MetaWord* here = top();
+      assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
+      inc_top(padding_chunk_word_size);
+
+      // Create new padding chunk.
+      ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
+      assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
+
+      Metachunk* const padding_chunk =
+        ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
+      assert(padding_chunk == (Metachunk*)here, "Sanity");
+      DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
+      log_trace(gc, metaspace, freelist)("Coalescation (%s): created padding chunk at "
+                                         PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
+                                         (is_class() ? "class space " : "metaspace"),
+                                         p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
+
+      // Mark chunk start in occupancy map.
+      occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
+
+      // Chunks are born as in-use (see MetaChunk ctor). So, before returning
+      // the padding chunk to its chunk manager, mark it as in use (ChunkManager
+      // will assert that).
+      do_update_in_use_info_for_chunk(padding_chunk, true);
+
+      // Return Chunk to freelist.
+      inc_container_count();
+      chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
+      // Please note: at this point, ChunkManager::return_single_chunk()
+      // may have merged the padding chunk with neighboring chunks, so
+      // it may have vanished at this point. Do not reference the padding
+      // chunk beyond this point.
+    }
+
+  } // End: create padding chunks if necessary.
+
+  // Now, top should be aligned correctly.
+  assert_is_aligned(top(), required_chunk_alignment);
+
   // Bottom of the new chunk
   MetaWord* chunk_limit = top();
   assert(chunk_limit != NULL, "Not safe to call this method");
 
   // The virtual spaces are always expanded by the

@@ -1124,11 +1640,24 @@
 
   // Take the space  (bump top on the current virtual space).
   inc_top(chunk_word_size);
 
   // Initialize the chunk
-  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
+  ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
+  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
+  assert(result == (Metachunk*)chunk_limit, "Sanity");
+  occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
+  do_update_in_use_info_for_chunk(result, true);
+
+  inc_container_count();
+
+  DEBUG_ONLY(chunk_manager->locked_verify());
+  DEBUG_ONLY(this->verify());
+  DEBUG_ONLY(do_verify_chunk(result));
+
+  DEBUG_ONLY(result->inc_use_count();)
+
   return result;
 }
 
 
 // Expand the virtual space (commit more of the reserved space)

@@ -1143,21 +1672,26 @@
   }
 
   size_t commit = MIN2(preferred_bytes, uncommitted);
   bool result = virtual_space()->expand_by(commit, false);
 
+  if (result) {
+    log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
+              (is_class() ? "class" : "non-class"), commit);
+  } else {
+    log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
+              (is_class() ? "class" : "non-class"), commit);
+  }
+
   assert(result, "Failed to commit memory");
 
   return result;
 }
 
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
   Metachunk* result = take_from_committed(chunk_word_size);
-  if (result != NULL) {
-    inc_container_count();
-  }
   return result;
 }
 
 bool VirtualSpaceNode::initialize() {
 

@@ -1193,10 +1727,14 @@
            "Reserved size was not set properly " SIZE_FORMAT
            " != " SIZE_FORMAT, reserved()->word_size(),
            _rs.size() / BytesPerWord);
   }
 
+  // Initialize Occupancy Map.
+  const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
+  _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
+
   return result;
 }
 
 void VirtualSpaceNode::print_on(outputStream* st) const {
   size_t used = used_words_in_vs();

@@ -1277,10 +1815,143 @@
 
   // Chunk has been removed from the chunks free list, update counters.
   account_for_removed_chunk(chunk);
 }
 
+bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  assert(chunk != NULL, "invalid chunk pointer");
+  // Check for valid coalescation combinations.
+  assert((chunk->get_chunk_type() == SpecializedIndex &&
+          (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
+         (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
+        "Invalid chunk coalescation combination.");
+
+  const size_t target_chunk_word_size =
+    get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
+
+  MetaWord* const p_coalescation_start =
+    (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
+  MetaWord* const p_coalescation_end =
+    p_coalescation_start + target_chunk_word_size;
+
+  // We need the VirtualSpaceNode containing this chunk and its occupancy map.
+  VirtualSpaceNode* const vsn = chunk->container();
+  OccupancyMap* const ocmap = vsn->occupancy_map();
+
+  // The potential coalescation range shall be completely contained by the
+  // committed range of the virtual space node.
+  if (p_coalescation_start < vsn->bottom() || p_coalescation_end > vsn->top()) {
+    return false;
+  }
+
+  // Only attempt to coalesce if at the start of the potential
+  // coalescation range a chunk starts and at the end of the potential
+  // coalescation range a chunk ends. If that is not the case - so, if
+  // a chunk straddles either start or end of the coalescation range,
+  // we cannot coalesce.  Note that this should only happen with
+  // humongous chunks.
+  if (!ocmap->chunk_starts_at_address(p_coalescation_start)) {
+    return false;
+  }
+
+  // (A chunk ends at the coalescation range end either if this is the
+  // end of the used area or if a new chunk starts right away.)
+  if (p_coalescation_end < vsn->top()) {
+    if (!ocmap->chunk_starts_at_address(p_coalescation_end)) {
+      return false;
+    }
+  }
+
+  // Now check if in the coalescation area there are still life chunks.
+  if (ocmap->is_region_in_use(p_coalescation_start, target_chunk_word_size)) {
+    return false;
+  }
+
+  // Success! Remove all chunks in this region...
+  log_trace(gc, metaspace, freelist)("Coalescation (%s): coalescing chunks in area [%p-%p)...",
+    (is_class() ? "class space" : "metaspace"),
+    p_coalescation_start, p_coalescation_end);
+
+  const int num_chunks_removed =
+    remove_chunks_in_area(p_coalescation_start, target_chunk_word_size);
+
+  // ... and create a single new bigger chunk.
+  Metachunk* const p_new_chunk =
+      ::new (p_coalescation_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
+  assert(p_new_chunk == (Metachunk*)p_coalescation_start, "Sanity");
+  DEBUG_ONLY(p_new_chunk->set_origin(origin_coalescation);)
+
+  log_trace(gc, metaspace, freelist)("Coalescation (%s): created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
+    (is_class() ? "class space" : "metaspace"),
+    p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
+
+  // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
+  ocmap->wipe_chunk_start_bits_in_region(p_coalescation_start, target_chunk_word_size);
+  ocmap->set_chunk_starts_at_address(p_coalescation_start, true);
+
+  // Mark chunk as free. Note: it is not necessary to update the occupancy
+  // map in-use map, because the old chunks were also free, so nothing
+  // should have changed.
+  p_new_chunk->set_is_tagged_free(true);
+
+  // Add new chunk to its freelist.
+  ChunkList* const list = free_chunks(target_chunk_type);
+  list->return_chunk_at_head(p_new_chunk);
+
+  // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
+  // should not have changed, because the size of the space should be the same)
+  _free_chunks_count -= num_chunks_removed;
+  _free_chunks_count ++;
+
+  // VirtualSpaceNode::container_count does not have to be modified:
+  // it means "number of active (non-free) chunks", so coalescation of
+  // free chunks should not affect that count.
+
+  // At the end of a coalescation, run verification tests.
+  DEBUG_ONLY(this->locked_verify());
+  DEBUG_ONLY(vsn->verify());
+
+  return true;
+}
+
+// Remove all chunks in the given area - the chunks are supposed to be free -
+// from their corresponding freelists. Mark them as invalid.
+// - This does not correct the occupancy map.
+// - This does not adjust the counters in ChunkManager.
+// - Does not adjust container count counter in containing VirtualSpaceNode
+// Returns number of chunks removed.
+int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
+  assert(p != NULL && word_size > 0, "Invalid range.");
+  const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
+  assert_is_aligned(word_size, smallest_chunk_size);
+
+  Metachunk* const start = (Metachunk*) p;
+  const Metachunk* const end = (Metachunk*)(p + word_size);
+  Metachunk* cur = start;
+  int num_removed = 0;
+  while (cur < end) {
+    Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
+    DEBUG_ONLY(do_verify_chunk(cur));
+    assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
+    assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
+    log_trace(gc, metaspace, freelist)("Coalescation (%s): removing chunk %p, size " SIZE_FORMAT_HEX ".",
+      (is_class() ? "class space" : "metaspace"),
+      cur, cur->word_size() * sizeof(MetaWord));
+    DEBUG_ONLY(cur->remove_sentinel();)
+    // Note: cannot call ChunkManager::remove_chunk, because that
+    // modifies the counters in ChunkManager, which we do not want. So
+    // we call remove_chunk on the freelist directly (see also the
+    // splitting function which does the same).
+    ChunkList* const list = free_chunks(list_index(cur->word_size()));
+    list->remove_chunk(cur);
+    num_removed ++;
+    cur = next;
+  }
+  return num_removed;
+}
+
 // Walk the list of VirtualSpaceNodes and delete
 // nodes with a 0 container_count.  Remove Metachunks in
 // the node from their respective freelists.
 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");

@@ -1295,10 +1966,12 @@
     DEBUG_ONLY(vsl->verify_container_count();)
     next_vsl = vsl->next();
     // Don't free the current virtual space since it will likely
     // be needed soon.
     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
+      log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
+                                         ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
       // Unlink it from the list
       if (prev_vsl == vsl) {
         // This is the case of the current node being the first node.
         assert(vsl == virtual_space_list(), "Expected to be the first node");
         set_virtual_space_list(vsl->next());

@@ -1356,18 +2029,26 @@
   vsn->retire(cm);
 }
 
 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
   DEBUG_ONLY(verify_container_count();)
+  assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
     ChunkIndex index = (ChunkIndex)i;
     size_t chunk_size = chunk_manager->size_by_index(index);
 
     while (free_words_in_vs() >= chunk_size) {
       Metachunk* chunk = get_chunk_vs(chunk_size);
-      assert(chunk != NULL, "allocation should have been successful");
-
+      // Chunk will be allocated aligned, so allocation may require
+      // additional padding chunks. That may cause above allocation to
+      // fail. Just ignore the failed allocation and continue with the
+      // next smaller chunk size. As the VirtualSpaceNode comitted
+      // size should be a multiple of the smallest chunk size, we
+      // should always be able to fill the VirtualSpace completely.
+      if (chunk == NULL) {
+        break;
+      }
       chunk_manager->return_single_chunk(index, chunk);
     }
     DEBUG_ONLY(verify_container_count();)
   }
   assert(free_words_in_vs() == 0, "should be empty now");

@@ -1392,11 +2073,11 @@
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
-  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
+  VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
   bool succeeded = class_entry->initialize();
   if (succeeded) {
     link_vs(class_entry);
   }
 }

@@ -1424,11 +2105,11 @@
   // Reserve the space
   size_t vs_byte_size = vs_word_size * BytesPerWord;
   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
 
   // Allocate the meta virtual space and initialize it.
-  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
+  VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
   if (!new_entry->initialize()) {
     delete new_entry;
     return false;
   } else {
     assert(new_entry->reserved_words() == vs_word_size,

@@ -1481,28 +2162,38 @@
 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
   assert(min_words <= preferred_words, "Invalid arguments");
 
+  const char* const class_or_not = (is_class() ? "class" : "non-class");
+
   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
+    log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
+              class_or_not);
     return  false;
   }
 
   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
   if (allowed_expansion_words < min_words) {
+    log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
+              class_or_not);
     return false;
   }
 
   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
 
   // Commit more memory from the the current virtual space.
   bool vs_expanded = expand_node_by(current_virtual_space(),
                                     min_words,
                                     max_expansion_words);
   if (vs_expanded) {
+     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
+               class_or_not);
     return true;
   }
+  log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
+            class_or_not);
   retire_current_virtual_space();
 
   // Get another virtual space.
   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());

@@ -1522,10 +2213,28 @@
   }
 
   return false;
 }
 
+// Given a chunk, calculate the largest possible padding space which
+// could be required when allocating it.
+static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
+  const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
+  if (chunk_type != HumongousIndex) {
+    // Normal, non-humongous chunks are allocated at chunk size
+    // boundaries, so the largest padding space required would be that
+    // minus the smallest chunk size.
+    const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
+    return chunk_word_size - smallest_chunk_size;
+  } else {
+    // Humongous chunks are allocated at smallest-chunksize
+    // boundaries, so there is no padding required.
+    return 0;
+  }
+}
+
+
 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
 
   // Allocate a chunk out of the current virtual space.
   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 

@@ -1534,11 +2243,15 @@
   }
 
   // The expand amount is currently only determined by the requested sizes
   // and not how much committed memory is left in the current virtual space.
 
-  size_t min_word_size       = align_up(chunk_word_size,              Metaspace::commit_alignment_words());
+  // We must have enough space for the requested size and any
+  // additional reqired padding chunks.
+  const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
+
+  size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
   if (min_word_size >= preferred_word_size) {
     // Can happen when humongous chunks are allocated.
     preferred_word_size = min_word_size;
   }

@@ -1674,17 +2387,21 @@
 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
   // Check if the compressed class space is full.
   if (is_class && Metaspace::using_class_space()) {
     size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
+      log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
+                (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
       return false;
     }
   }
 
   // Check if the user has imposed a limit on the metaspace memory.
   size_t committed_bytes = MetaspaceAux::committed_bytes();
   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
+    log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
+              (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
     return false;
   }
 
   return true;
 }

@@ -1698,10 +2415,13 @@
          capacity_until_gc, committed_bytes);
 
   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
   size_t left_until_GC = capacity_until_gc - committed_bytes;
   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
+  log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
+            " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
+            left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 
   return left_to_commit / BytesPerWord;
 }
 
 void MetaspaceGC::compute_new_size() {

@@ -1946,10 +2666,21 @@
 }
 
 void ChunkManager::locked_verify() {
   locked_verify_free_chunks_count();
   locked_verify_free_chunks_total();
+  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
+    ChunkList* list = free_chunks(i);
+    if (list != NULL) {
+      Metachunk* chunk = list->head();
+      while (chunk) {
+        DEBUG_ONLY(do_verify_chunk(chunk);)
+        assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
+        chunk = chunk->next();
+      }
+    }
+  }
 }
 
 void ChunkManager::locked_print_free_chunks(outputStream* st) {
   assert_lock_strong(SpaceManager::expand_lock());
   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,

@@ -2017,10 +2748,139 @@
     ChunkList* free_list = find_free_chunks_list(word_size);
     assert(free_list != NULL, "Sanity check");
 
     chunk = free_list->head();
 
+    // Coalescation: Split large chunks into smaller chunks if there
+    // are no smaller chunks, just large chunks. This is the
+    // counterpart of the coalescing-upon-chunk-return.
+    if (chunk == NULL) {
+
+      ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
+
+      // Is there a larger chunk we could split?
+      Metachunk* larger_chunk = NULL;
+      ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
+      while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
+        larger_chunk = free_chunks(larger_chunk_index)->head();
+        if (larger_chunk == NULL) {
+          larger_chunk_index = next_chunk_index(larger_chunk_index);
+        }
+      }
+
+      if (larger_chunk != NULL) {
+        assert(larger_chunk->word_size() > word_size, "Sanity");
+        assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
+
+        // We found a larger chunk. Lets split it up:
+        // - remove old chunk
+        // - in its place, create new smaller chunks, with at least one chunk
+        //   being of target size, the others sized as large as possible. This
+        //   is to make sure the resulting chunks are "as coalesced as possible"
+        //   (similar to VirtualSpaceNode::retire()).
+        // Note: during this operation both ChunkManager and VirtualSpaceNode
+        //  are temporarily invalid, so be careful with asserts.
+
+        log_trace(gc, metaspace, freelist)("Coalescation (%s): splitting chunk " PTR_FORMAT
+           ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
+          (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
+          chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
+
+        MetaWord* const region_start = (MetaWord*)larger_chunk;
+        const size_t region_word_len = larger_chunk->word_size();
+        MetaWord* const region_end = region_start + region_word_len;
+        VirtualSpaceNode* const vsn = larger_chunk->container();
+        OccupancyMap* const ocmap = vsn->occupancy_map();
+
+        // Remove old chunk.
+        free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
+        DEBUG_ONLY(larger_chunk->remove_sentinel();)
+
+        DEBUG_ONLY(larger_chunk = NULL); // Prevent access from here on and wipe area.
+        DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
+
+        // In its place create first the target chunk...
+        MetaWord* p = region_start;
+        chunk = ::new (p) Metachunk(target_chunk_index, is_class(), word_size, vsn);
+        assert(chunk == (Metachunk*)p, "Sanity");
+        DEBUG_ONLY(chunk->set_origin(origin_split);)
+
+        // Note: we do not need to mark its start in the occupancy map
+        // because it coincides with the old chunk start.
+
+        // We are about to return it, so mark it in use and update vsn count.
+        do_update_in_use_info_for_chunk(chunk, true);
+        account_for_removed_chunk(chunk);
+        vsn->inc_container_count();
+
+        // This chunk should now be valid and can be verified.
+        DEBUG_ONLY(do_verify_chunk(chunk));
+
+        // In the remaining space create the remainder chunks.
+        p += chunk->word_size();
+        assert(p < region_end, "Sanity");
+
+        while (p < region_end) {
+
+          // Find the largest chunk size which fits the alignment requirements at address p.
+          ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
+          size_t this_chunk_word_size = 0;
+          for(;;) {
+            this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
+            if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
+              break;
+            } else {
+              this_chunk_index = prev_chunk_index(this_chunk_index);
+              assert(this_chunk_index >= target_chunk_index, "Sanity");
+            }
+          }
+
+          assert(this_chunk_word_size >= word_size, "Sanity");
+          assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
+          assert(p + this_chunk_word_size <= region_end, "Sanity");
+
+          // Create splitting chunk.
+          Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
+          assert(this_chunk == (Metachunk*)p, "Sanity");
+          DEBUG_ONLY(this_chunk->set_origin(origin_split);)
+          ocmap->set_chunk_starts_at_address(p, true);
+          do_update_in_use_info_for_chunk(this_chunk, false);
+
+          // This chunk should be valid and can be verified.
+          DEBUG_ONLY(do_verify_chunk(this_chunk));
+
+          // Return this chunk to freelist and correct counter.
+          free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
+          _free_chunks_count ++;
+
+          log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
+            SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
+            p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
+            p2i(region_start), p2i(region_end));
+
+          p += this_chunk_word_size;
+
+        }
+
+        // ChunkManager and VirtualSpaceNode should be valid at this point.
+        DEBUG_ONLY(this->locked_verify());
+
+        // This will also walk the chunks in the address range and
+        // verify that we left no "holes".
+        DEBUG_ONLY(vsn->verify());
+        DEBUG_ONLY(chunk->container()->verify_container_count());
+
+        log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get (%s): Returning chunk "
+          PTR_FORMAT ", word size (" SIZE_FORMAT_HEX " (%s)",
+          (is_class() ? "class space" : "metaspace"), p2i(chunk), chunk->word_size(), chunk_size_name(chunk->get_chunk_type()));
+
+        DEBUG_ONLY(chunk->inc_use_count();)
+        return chunk;
+      }
+
+    }
+
     if (chunk == NULL) {
       return NULL;
     }
 
     // Remove the chunk as the head of the list.

@@ -2046,14 +2906,16 @@
   chunk->set_next(NULL);
   chunk->set_prev(NULL);
 
   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
   // work.
-  chunk->set_is_tagged_free(false);
+  do_update_in_use_info_for_chunk(chunk, true);
   chunk->container()->inc_container_count();
+  DEBUG_ONLY(chunk->container()->verify_container_count());
 
-  slow_locked_verify();
+  DEBUG_ONLY(slow_locked_verify());
+  DEBUG_ONLY(chunk->inc_use_count();)
   return chunk;
 }
 
 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
   assert_lock_strong(SpaceManager::expand_lock());

@@ -2087,19 +2949,21 @@
   return chunk;
 }
 
 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
   assert_lock_strong(SpaceManager::expand_lock());
+  DEBUG_ONLY(do_verify_chunk(chunk);)
+  assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
   assert(chunk != NULL, "Expected chunk.");
   assert(chunk->container() != NULL, "Container should have been set.");
   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
   index_bounds_check(index);
 
   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
-  NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
+  DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
 
   if (index != HumongousIndex) {
     // Return non-humongous chunk to freelist.
     ChunkList* list = free_chunks(index);
     assert(list->size() == chunk->word_size(), "Wrong chunk type.");

@@ -2114,15 +2978,28 @@
     _humongous_dictionary.return_chunk(chunk);
     log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
         chunk_size_name(index), p2i(chunk), chunk->word_size());
   }
   chunk->container()->dec_container_count();
-  chunk->set_is_tagged_free(true);
+  do_update_in_use_info_for_chunk(chunk, false);
 
   // Chunk has been added; update counters.
   account_for_added_chunk(chunk);
 
+  // Attempt coalesce returned chunks with its neighboring chunks:
+  // if this chunk is small or special, attempt to coalesce to a medium chunk.
+  if (index == SmallIndex || index == SpecializedIndex) {
+    if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
+      // This did not work. But if this chunk is special, we still may form a small chunk?
+      if (index == SpecializedIndex) {
+        if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
+          // give up.
+        }
+      }
+    }
+  }
+
 }
 
 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
   index_bounds_check(index);
   if (chunks == NULL) {

@@ -2587,10 +3464,15 @@
          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
 
   MutexLockerEx fcl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
 
+  assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
+         "sum_count_in_chunks_in_use() " SIZE_FORMAT
+         " allocated_chunks_count() " SIZE_FORMAT,
+         sum_count_in_chunks_in_use(), allocated_chunks_count());
+
   chunk_manager()->slow_locked_verify();
 
   dec_total_from_size_metrics();
 
   Log(gc, metaspace, freelist) log;

@@ -2710,49 +3592,10 @@
   }
 
   return next;
 }
 
-/*
- * The policy is to allocate up to _small_chunk_limit small chunks
- * after which only medium chunks are allocated.  This is done to
- * reduce fragmentation.  In some cases, this can result in a lot
- * of small chunks being allocated to the point where it's not
- * possible to expand.  If this happens, there may be no medium chunks
- * available and OOME would be thrown.  Instead of doing that,
- * if the allocation request size fits in a small chunk, an attempt
- * will be made to allocate a small chunk.
- */
-MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
-  size_t raw_word_size = get_allocation_word_size(word_size);
-
-  if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
-    return NULL;
-  }
-
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
-
-  Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
-
-  MetaWord* mem = NULL;
-
-  if (chunk != NULL) {
-    // Add chunk to the in-use chunk list and do an allocation from it.
-    // Add to this manager's list of chunks in use.
-    add_chunk(chunk, false);
-    mem = chunk->allocate(raw_word_size);
-
-    inc_used_metrics(raw_word_size);
-
-    // Track metaspace memory usage statistic.
-    track_metaspace_memory_usage();
-  }
-
-  return mem;
-}
-
 MetaWord* SpaceManager::allocate(size_t word_size) {
   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
   size_t raw_word_size = get_allocation_word_size(word_size);
   BlockFreelist* fl =  block_freelists();
   MetaWord* p = NULL;

@@ -2806,12 +3649,12 @@
   // being in the dictionary alters a chunk.
   if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
       Metachunk* curr = chunks_in_use(i);
       while (curr != NULL) {
-        curr->verify();
-        verify_chunk_size(curr);
+        DEBUG_ONLY(do_verify_chunk(curr);)
+        assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
         curr = curr->next();
       }
     }
   }
 }

@@ -3648,11 +4491,11 @@
   // The reserved space size may be bigger because of alignment, esp with UseLargePages
   assert(rs.size() >= CompressedClassSpaceSize,
          SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
   assert(using_class_space(), "Must be using class space");
   _class_space_list = new VirtualSpaceList(rs);
-  _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+  _chunk_manager_class = new ChunkManager(true/*is_class*/);
 
   if (!_class_space_list->initialization_succeeded()) {
     vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
   }
 }

@@ -3755,11 +4598,11 @@
   size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
   word_size = align_up(word_size, Metaspace::reserve_alignment_words());
 
   // Initialize the list of virtual spaces.
   _space_list = new VirtualSpaceList(word_size);
-  _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+  _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
 
   if (!_space_list->initialization_succeeded()) {
     vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
   }
 

@@ -3955,23 +4798,12 @@
       result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
     }
   }
 
   if (result == NULL) {
-    SpaceManager* sm;
-    if (is_class_space_allocation(mdtype)) {
-      sm = loader_data->metaspace_non_null()->class_vsm();
-    } else {
-      sm = loader_data->metaspace_non_null()->vsm();
-    }
-
-    result = sm->get_small_chunk_and_allocate(word_size);
-
-    if (result == NULL) {
       report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
     }
-  }
 
   // Zero initialize.
   Copy::fill_to_words((HeapWord*)result, word_size, 0);
 
   return result;

@@ -4097,10 +4929,28 @@
     out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
     class_vsm()->dump(out);
   }
 }
 
+#ifdef ASSERT
+static void do_verify_chunk(Metachunk* chunk) {
+  guarantee(chunk != NULL, "Sanity");
+  // Verify chunk itself; then verify that it is consistent with the
+  // occupany map of its containing node.
+  chunk->verify();
+  VirtualSpaceNode* const vsn = chunk->container();
+  OccupancyMap* const ocmap = vsn->occupancy_map();
+  ocmap->verify_for_chunk(chunk);
+}
+#endif
+
+static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
+  chunk->set_is_tagged_free(!inuse);
+  OccupancyMap* const ocmap = chunk->container()->occupancy_map();
+  ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
+}
+
 /////////////// Unit tests ///////////////
 
 #ifndef PRODUCT
 
 class TestMetaspaceAuxTest : AllStatic {

@@ -4187,20 +5037,20 @@
     // The chunk sizes must be multiples of eachother, or this will fail
     STATIC_ASSERT(MediumChunk % SmallChunk == 0);
     STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
 
     { // No committed memory in VSN
-      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
-      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      ChunkManager cm(false);
+      VirtualSpaceNode vsn(false, vsn_test_size_bytes);
       vsn.initialize();
       vsn.retire(&cm);
       assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
     }
 
     { // All of VSN is committed, half is used by chunks
-      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
-      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      ChunkManager cm(false);
+      VirtualSpaceNode vsn(false, vsn_test_size_bytes);
       vsn.initialize();
       vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
       vsn.get_chunk_vs(MediumChunk);
       vsn.get_chunk_vs(MediumChunk);
       vsn.retire(&cm);

@@ -4210,12 +5060,12 @@
 
     const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
     // This doesn't work for systems with vm_page_size >= 16K.
     if (page_chunks < MediumChunk) {
       // 4 pages of VSN is committed, some is used by chunks
-      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
-      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      ChunkManager cm(false);
+      VirtualSpaceNode vsn(false, vsn_test_size_bytes);
 
       vsn.initialize();
       vsn.expand_by(page_chunks, page_chunks);
       vsn.get_chunk_vs(SmallChunk);
       vsn.get_chunk_vs(SpecializedChunk);

@@ -4231,12 +5081,12 @@
       assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
       assert(cm.sum_free_chunks() == words_left, "sizes should add up");
     }
 
     { // Half of VSN is committed, a humongous chunk is used
-      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
-      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      ChunkManager cm(false);
+      VirtualSpaceNode vsn(false, vsn_test_size_bytes);
       vsn.initialize();
       vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
       vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
       vsn.retire(&cm);
 

@@ -4263,11 +5113,11 @@
          "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
          (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
 
   static void test_is_available_positive() {
     // Reserve some memory.
-    VirtualSpaceNode vsn(os::vm_allocation_granularity());
+    VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
 
     // Commit some memory.
     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);

@@ -4281,11 +5131,11 @@
     assert_is_available_positive(expand_word_size);
   }
 
   static void test_is_available_negative() {
     // Reserve some memory.
-    VirtualSpaceNode vsn(os::vm_allocation_granularity());
+    VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
 
     // Commit some memory.
     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);

@@ -4296,11 +5146,11 @@
     assert_is_available_negative(two_times_commit_word_size);
   }
 
   static void test_is_available_overflow() {
     // Reserve some memory.
-    VirtualSpaceNode vsn(os::vm_allocation_granularity());
+    VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
     assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
 
     // Commit some memory.
     size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
     bool expanded = vsn.expand_by(commit_word_size, commit_word_size);

@@ -4321,19 +5171,14 @@
     TestVirtualSpaceNodeTest::test_is_available_negative();
     TestVirtualSpaceNodeTest::test_is_available_overflow();
   }
 };
 
-void TestVirtualSpaceNode_test() {
-  TestVirtualSpaceNodeTest::test();
-  TestVirtualSpaceNodeTest::test_is_available();
-}
-
 // The following test is placed here instead of a gtest / unittest file
 // because the ChunkManager class is only available in this file.
 void ChunkManager_test_list_index() {
-  ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+  ChunkManager manager(true);
 
   // Test previous bug where a query for a humongous class metachunk,
   // incorrectly matched the non-class medium metachunk size.
   {
     assert(MediumChunk > ClassMediumChunk, "Precondition for test");

@@ -4366,270 +5211,10 @@
 
 #endif // !PRODUCT
 
 #ifdef ASSERT
 
-// ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
-// returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
-// content.
-class ChunkManagerReturnTestImpl : public CHeapObj<mtClass> {
-
-  VirtualSpaceNode _vsn;
-  ChunkManager _cm;
-
-  // The expected content of the chunk manager.
-  unsigned _chunks_in_chunkmanager;
-  size_t _words_in_chunkmanager;
-
-  // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
-  static const int num_chunks = 256;
-  Metachunk* _pool[num_chunks];
-
-  // Helper, return a random position into the chunk pool.
-  static int get_random_position() {
-    return os::random() % num_chunks;
-  }
-
-  // Asserts that ChunkManager counters match expectations.
-  void assert_counters() {
-    assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
-    assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
-    assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
-  }
-
-  // Get a random chunk size. Equal chance to get spec/med/small chunk size or
-  // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
-  size_t get_random_chunk_size() {
-    const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
-    const int rand = os::random() % 4;
-    if (rand < 3) {
-      return sizes[rand];
-    } else {
-      // Note: this affects the max. size of space (see _vsn initialization in ctor).
-      return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
-    }
-  }
-
-  // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
-  // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
-  int next_matching_chunk(int start, bool is_free) const {
-    assert(start >= 0 && start < num_chunks, "invalid parameter");
-    int pos = start;
-    do {
-      if (++pos == num_chunks) {
-        pos = 0;
-      }
-      if (_pool[pos]->is_tagged_free() == is_free) {
-        return pos;
-      }
-    } while (pos != start);
-    return -1;
-  }
-
-  // A structure to keep information about a chunk list including which
-  // chunks are part of this list. This is needed to keep information about a chunk list
-  // we will to return to the ChunkManager, because the original list will be destroyed.
-  struct AChunkList {
-    Metachunk* head;
-    Metachunk* all[num_chunks];
-    size_t size;
-    int num;
-    ChunkIndex index;
-  };
-
-  // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
-  // a random chunk list of max. length <list_size> of chunks with the same
-  // ChunkIndex (chunk size).
-  // Returns false if list cannot be assembled. List is returned in the <out>
-  // structure. Returned list may be smaller than <list_size>.
-  bool assemble_random_chunklist(AChunkList* out, int list_size) {
-    // Choose a random in-use chunk from the pool...
-    const int headpos = next_matching_chunk(get_random_position(), false);
-    if (headpos == -1) {
-      return false;
-    }
-    Metachunk* const head = _pool[headpos];
-    out->all[0] = head;
-    assert(head->is_tagged_free() == false, "Chunk state mismatch");
-    // ..then go from there, chain it up with up to list_size - 1 number of other
-    // in-use chunks of the same index.
-    const ChunkIndex index = _cm.list_index(head->word_size());
-    int num_added = 1;
-    size_t size_added = head->word_size();
-    int pos = headpos;
-    Metachunk* tail = head;
-    do {
-      pos = next_matching_chunk(pos, false);
-      if (pos != headpos) {
-        Metachunk* c = _pool[pos];
-        assert(c->is_tagged_free() == false, "Chunk state mismatch");
-        if (index == _cm.list_index(c->word_size())) {
-          tail->set_next(c);
-          c->set_prev(tail);
-          tail = c;
-          out->all[num_added] = c;
-          num_added ++;
-          size_added += c->word_size();
-        }
-      }
-    } while (num_added < list_size && pos != headpos);
-    out->head = head;
-    out->index = index;
-    out->size = size_added;
-    out->num = num_added;
-    return true;
-  }
-
-  // Take a single random chunk from the ChunkManager.
-  bool take_single_random_chunk_from_chunkmanager() {
-    assert_counters();
-    _cm.locked_verify();
-    int pos = next_matching_chunk(get_random_position(), true);
-    if (pos == -1) {
-      return false;
-    }
-    Metachunk* c = _pool[pos];
-    assert(c->is_tagged_free(), "Chunk state mismatch");
-    // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
-    // ChunkManager::free_chunks_get() with this chunk's word size. We really want
-    // to exercise ChunkManager::free_chunks_get() because that one gets called for
-    // normal chunk allocation.
-    Metachunk* c2 = _cm.free_chunks_get(c->word_size());
-    assert(c2 != NULL, "Unexpected.");
-    assert(!c2->is_tagged_free(), "Chunk state mismatch");
-    assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
-    _chunks_in_chunkmanager --;
-    _words_in_chunkmanager -= c->word_size();
-    assert_counters();
-    _cm.locked_verify();
-    return true;
-  }
-
-  // Returns a single random chunk to the chunk manager. Returns false if that
-  // was not possible (all chunks are already in the chunk manager).
-  bool return_single_random_chunk_to_chunkmanager() {
-    assert_counters();
-    _cm.locked_verify();
-    int pos = next_matching_chunk(get_random_position(), false);
-    if (pos == -1) {
-      return false;
-    }
-    Metachunk* c = _pool[pos];
-    assert(c->is_tagged_free() == false, "wrong chunk information");
-    _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
-    _chunks_in_chunkmanager ++;
-    _words_in_chunkmanager += c->word_size();
-    assert(c->is_tagged_free() == true, "wrong chunk information");
-    assert_counters();
-    _cm.locked_verify();
-    return true;
-  }
-
-  // Return a random chunk list to the chunk manager. Returns the length of the
-  // returned list.
-  int return_random_chunk_list_to_chunkmanager(int list_size) {
-    assert_counters();
-    _cm.locked_verify();
-    AChunkList aChunkList;
-    if (!assemble_random_chunklist(&aChunkList, list_size)) {
-      return 0;
-    }
-    // Before returning chunks are returned, they should be tagged in use.
-    for (int i = 0; i < aChunkList.num; i ++) {
-      assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
-    }
-    _cm.return_chunk_list(aChunkList.index, aChunkList.head);
-    _chunks_in_chunkmanager += aChunkList.num;
-    _words_in_chunkmanager += aChunkList.size;
-    // After all chunks are returned, check that they are now tagged free.
-    for (int i = 0; i < aChunkList.num; i ++) {
-      assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
-    }
-    assert_counters();
-    _cm.locked_verify();
-    return aChunkList.num;
-  }
-
-public:
-
-  ChunkManagerReturnTestImpl()
-    : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
-    , _cm(SpecializedChunk, SmallChunk, MediumChunk)
-    , _chunks_in_chunkmanager(0)
-    , _words_in_chunkmanager(0)
-  {
-    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
-    // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
-    // "in use", because not yet added to any chunk manager.
-    _vsn.initialize();
-    _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
-    for (int i = 0; i < num_chunks; i ++) {
-      const size_t size = get_random_chunk_size();
-      _pool[i] = _vsn.get_chunk_vs(size);
-      assert(_pool[i] != NULL, "allocation failed");
-    }
-    assert_counters();
-    _cm.locked_verify();
-  }
-
-  // Test entry point.
-  // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
-  // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
-  // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
-  // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
-  // chunks manager, thereby emptying or filling it completely.
-  void do_test(float phase_length_factor) {
-    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
-    assert_counters();
-    // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
-    const int num_max_ops = num_chunks * 100;
-    int num_ops = num_max_ops;
-    const int average_phase_length = (int)(phase_length_factor * num_chunks);
-    int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
-    bool return_phase = true;
-    while (num_ops > 0) {
-      int chunks_moved = 0;
-      if (return_phase) {
-        // Randomly switch between returning a single chunk or a random length chunk list.
-        if (os::random() % 2 == 0) {
-          if (return_single_random_chunk_to_chunkmanager()) {
-            chunks_moved = 1;
-          }
-        } else {
-          const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
-          chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
-        }
-      } else {
-        // Breath out.
-        if (take_single_random_chunk_from_chunkmanager()) {
-          chunks_moved = 1;
-        }
-      }
-      num_ops -= chunks_moved;
-      num_ops_until_switch -= chunks_moved;
-      if (chunks_moved == 0 || num_ops_until_switch <= 0) {
-        return_phase = !return_phase;
-        num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
-      }
-    }
-  }
-};
-
-void* setup_chunkmanager_returntests() {
-  ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
-  return p;
-}
-
-void teardown_chunkmanager_returntests(void* p) {
-  delete (ChunkManagerReturnTestImpl*) p;
-}
-
-void run_chunkmanager_returntests(void* p, float phase_length) {
-  ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
-  test->do_test(phase_length);
-}
-
 // The following test is placed here instead of a gtest / unittest file
 // because the ChunkManager class is only available in this file.
 class SpaceManagerTest : AllStatic {
   friend void SpaceManager_test_adjust_initial_chunk_size();
 

@@ -4676,5 +5261,41 @@
 void SpaceManager_test_adjust_initial_chunk_size() {
   SpaceManagerTest::test_adjust_initial_chunk_size();
 }
 
 #endif // ASSERT
+
+struct chunkmanager_statistics_t {
+  int num_specialized_chunks;
+  int num_small_chunks;
+  int num_medium_chunks;
+  int num_humongous_chunks;
+};
+
+extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
+  ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
+  ChunkManager::ChunkManagerStatistics stat;
+  chunk_manager->get_statistics(&stat);
+  out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
+  out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
+  out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
+  out->num_humongous_chunks = (int)stat.num_humongous_chunks;
+}
+
+struct chunk_geometry_t {
+  size_t specialized_chunk_word_size;
+  size_t small_chunk_word_size;
+  size_t medium_chunk_word_size;
+};
+
+extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
+  if (mdType == Metaspace::NonClassType) {
+    out->specialized_chunk_word_size = SpecializedChunk;
+    out->small_chunk_word_size = SmallChunk;
+    out->medium_chunk_word_size = MediumChunk;
+  } else {
+    out->specialized_chunk_word_size = ClassSpecializedChunk;
+    out->small_chunk_word_size = ClassSmallChunk;
+    out->medium_chunk_word_size = ClassMediumChunk;
+  }
+}
+
< prev index next >