# HG changeset patch # User stuefe # Date 1513185105 -3600 # Wed Dec 13 18:11:45 2017 +0100 # Node ID 828b7c0af987fdad213d2f46be97f0b1a163df56 # Parent 589a6f1d86e96708f56c9826a32c7d0e39203133 [mq]: metaspace-coalesc-patch diff -r 589a6f1d86e9 -r 828b7c0af987 src/hotspot/share/memory/metachunk.cpp --- a/src/hotspot/share/memory/metachunk.cpp Sat Dec 09 07:50:50 2017 -0800 +++ b/src/hotspot/share/memory/metachunk.cpp Wed Dec 13 18:11:45 2017 +0100 @@ -48,16 +48,21 @@ // Metachunk methods -Metachunk::Metachunk(size_t word_size, +Metachunk::Metachunk(ChunkIndex chunktype, bool is_class, ChunkOrigin origin, size_t word_size, VirtualSpaceNode* container) : Metabase(word_size), + _chunk_type(chunktype), + _is_class(is_class), + _chunk_origin(origin), _top(NULL), _container(container) { _top = initial_top(); set_is_tagged_free(false); + _sentinel = CHUNK_SENTINEL; #ifdef ASSERT mangle(uninitMetaWordVal); + verify(); #endif } @@ -83,8 +88,9 @@ void Metachunk::print_on(outputStream* st) const { st->print_cr("Metachunk:" " bottom " PTR_FORMAT " top " PTR_FORMAT - " end " PTR_FORMAT " size " SIZE_FORMAT, - p2i(bottom()), p2i(_top), p2i(end()), word_size()); + " end " PTR_FORMAT " size " SIZE_FORMAT " (%s)", + p2i(bottom()), p2i(_top), p2i(end()), word_size(), + chunk_size_name(get_chunk_type())); if (Verbose) { st->print_cr(" used " SIZE_FORMAT " free " SIZE_FORMAT, used_word_size(), free_word_size()); @@ -102,13 +108,39 @@ #endif // PRODUCT void Metachunk::verify() { -#ifdef ASSERT - // Cannot walk through the blocks unless the blocks have - // headers with sizes. - assert(bottom() <= _top && - _top <= (MetaWord*)end(), - "Chunk has been smashed"); -#endif - return; + assert(is_valid_sentinel(), "Chunk " PTR_FORMAT ": sentinel invalid", p2i(this)); + const ChunkIndex chunk_type = get_chunk_type(); + assert(is_valid_chunktype(chunk_type), "Chunk " PTR_FORMAT ": Invalid chunk type.", p2i(this)); + if (chunk_type != HumongousIndex) { + assert(word_size() == get_size_for_nonhumongous_chunktype(chunk_type, is_class()), + "Chunk " PTR_FORMAT ": wordsize " SIZE_FORMAT " does not fit chunk type %s.", + p2i(this), word_size(), chunk_size_name(chunk_type)); + } + assert(is_valid_chunkorigin(get_chunk_origin()), "Chunk " PTR_FORMAT ": Invalid chunk origin.", p2i(this)); + assert(bottom() <= _top && _top <= (MetaWord*)end(), + "Chunk " PTR_FORMAT ": Chunk top out of chunk bounds", p2i(this)); + + // For non-humongous chunks, starting address shall be aligned to its chunk size. Humongous chunks + // start address is aligned to specialized chunk size. + const size_t required_alignment = + (chunk_type != HumongousIndex ? word_size() : get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class())) * sizeof(MetaWord); + assert(is_aligned((address)this, required_alignment), + "Chunk " PTR_FORMAT ": (size " SIZE_FORMAT ") not aligned to " SIZE_FORMAT ".", + p2i(this), word_size() * sizeof(MetaWord), required_alignment); } +// Helper, returns a descriptive name for the given index. +const char* chunk_size_name(ChunkIndex index) { + switch (index) { + case SpecializedIndex: + return "specialized"; + case SmallIndex: + return "small"; + case MediumIndex: + return "medium"; + case HumongousIndex: + return "humongous"; + default: + return "Invalid index"; + } +} diff -r 589a6f1d86e9 -r 828b7c0af987 src/hotspot/share/memory/metachunk.hpp --- a/src/hotspot/share/memory/metachunk.hpp Sat Dec 09 07:50:50 2017 -0800 +++ b/src/hotspot/share/memory/metachunk.hpp Wed Dec 13 18:11:45 2017 +0100 @@ -94,15 +94,83 @@ // | | | | // +--------------+ <- bottom --+ --+ +// ChunkIndex (todo: rename?) defines the type of chunk. Chunk types +// differ by size: specialized < small < medium, chunks larger than +// medium are humongous chunks of varying size. +enum ChunkIndex { + ZeroIndex = 0, + SpecializedIndex = ZeroIndex, + SmallIndex = SpecializedIndex + 1, + MediumIndex = SmallIndex + 1, + HumongousIndex = MediumIndex + 1, + NumberOfFreeLists = 3, + NumberOfInUseLists = 4 +}; + +// Utility functions. +size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunk_type, bool is_class); +ChunkIndex get_chunk_type_by_size(size_t size, bool is_class); + +// Returns a descriptive name for a chunk type. +const char* chunk_size_name(ChunkIndex index); + +// Verify chunk type +inline bool is_valid_chunktype(ChunkIndex index) { + return index == SpecializedIndex || index == SmallIndex || + index == MediumIndex || index == HumongousIndex; +} + +inline bool is_valid_nonhumongous_chunktype(ChunkIndex index) { + return is_valid_chunktype(index) && index != HumongousIndex; +} + +enum ChunkOrigin { + // Chunk normally born (via take_from_committed) + chunk_origin_normal = 1, + // Chunk was born as padding chunk + chunk_origin_pad = 2, + // Chunk was born as leftover chunk in VirtualSpaceNode::retire + chunk_origin_leftover = 3, + // Chunk was born as result of a coalescation of smaller chunks + chunk_origin_coalescation = 4, + // Chunk was born as result of a split of a larger chunk + chunk_origin_split = 5, + + chunk_origin_minimum = chunk_origin_normal, + chunk_origin_maximum = chunk_origin_split, + chunk_origins_count = chunk_origin_maximum + 1 +}; + +inline bool is_valid_chunkorigin(ChunkOrigin origin) { + return origin == chunk_origin_normal || + origin == chunk_origin_pad || + origin == chunk_origin_leftover || + origin == chunk_origin_coalescation || + origin == chunk_origin_split; +} + + class Metachunk : public Metabase { friend class MetachunkTest; // The VirtualSpaceNode containing this chunk. - VirtualSpaceNode* _container; + VirtualSpaceNode* const _container; // Current allocation top. MetaWord* _top; + // Keep additional information in a 64bit word, we loose as much + // by alignment. But keep bools and enums as uint8_t. + + // a 32bit sentinel for debugging purposes. +#define CHUNK_SENTINEL 0x4d4554EF // "MET" +#define CHUNK_SENTINEL_INVALID 0xFEEEEEEF + uint32_t _sentinel; + + // Whether the chunk is free (in freelist) or in use by some class loader. bool _is_tagged_free; + const bool _is_class; + const ChunkIndex _chunk_type; + const ChunkOrigin _chunk_origin; MetaWord* initial_top() const { return (MetaWord*)this + overhead(); } MetaWord* top() const { return _top; } @@ -111,7 +179,7 @@ // Metachunks are allocated out of a MetadataVirtualSpace and // and use some of its space to describe itself (plus alignment // considerations). Metadata is allocated in the rest of the chunk. - // This size is the overhead of maintaining the Metachunk within + // This size is the overhead of maintaining the Metachunk withinchunk_origin_t chunk_origin, chunk_type_t chunk_type, // the space. // Alignment of each allocation in the chunks. @@ -120,7 +188,7 @@ // Size of the Metachunk header, including alignment. static size_t overhead(); - Metachunk(size_t word_size , VirtualSpaceNode* container); + Metachunk(ChunkIndex chunktype, bool is_class, ChunkOrigin origin, size_t word_size, VirtualSpaceNode* container); MetaWord* allocate(size_t word_size); @@ -149,6 +217,17 @@ void print_on(outputStream* st) const; void verify(); + + // returns true if chunk sentinel is valid + bool is_valid_sentinel() const { return _sentinel == CHUNK_SENTINEL; } + // wipes chunk sentinel + void remove_sentinel() { _sentinel = CHUNK_SENTINEL_INVALID; } + + ChunkIndex get_chunk_type() const { return _chunk_type; } + ChunkOrigin get_chunk_origin() const { return _chunk_origin; } + + bool is_class() const { return _is_class; } + }; // Metablock is the unit of allocation from a Chunk. diff -r 589a6f1d86e9 -r 828b7c0af987 src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp Sat Dec 09 07:50:50 2017 -0800 +++ b/src/hotspot/share/memory/metaspace.cpp Wed Dec 13 18:11:45 2017 +0100 @@ -56,7 +56,14 @@ typedef BinaryTreeDictionary > ChunkTreeDictionary; // Set this constant to enable slow integrity checking of the free chunk lists -const bool metaspace_slow_verify = false; +const bool metaspace_slow_verify = true; + +// Helper function does a bunch of checks for a chunk. +static void do_verify_chunk(Metachunk* chunk); + +// Helper function, given a Metachunk, update its in-use information +// (both the sentinel and the occupancy map) +static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse); size_t const allocation_from_dictionary_limit = 4 * K; @@ -67,33 +74,6 @@ DEBUG_ONLY(bool Metaspace::_frozen = false;) -// Used in declarations in SpaceManager and ChunkManager -enum ChunkIndex { - ZeroIndex = 0, - SpecializedIndex = ZeroIndex, - SmallIndex = SpecializedIndex + 1, - MediumIndex = SmallIndex + 1, - HumongousIndex = MediumIndex + 1, - NumberOfFreeLists = 3, - NumberOfInUseLists = 4 -}; - -// Helper, returns a descriptive name for the given index. -static const char* chunk_size_name(ChunkIndex index) { - switch (index) { - case SpecializedIndex: - return "specialized"; - case SmallIndex: - return "small"; - case MediumIndex: - return "medium"; - case HumongousIndex: - return "humongous"; - default: - return "Invalid index"; - } -} - enum ChunkSizes { // in words. ClassSpecializedChunk = 128, SpecializedChunk = 128, @@ -103,6 +83,47 @@ MediumChunk = 8 * K }; +// Returns size of this chunk type +size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) { + assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type."); + switch(chunktype) { + case SpecializedIndex: return is_class ? ClassSpecializedChunk : SpecializedChunk; + case SmallIndex: return is_class ? ClassSmallChunk : SmallChunk; + case MediumIndex: return is_class ? ClassMediumChunk : MediumChunk; + } + ShouldNotReachHere(); + return 0; +} + +ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) { + if (is_class) { + if (size == ClassSpecializedChunk) { + return SpecializedIndex; + } else if (size == ClassSmallChunk) { + return SmallIndex; + } else if (size == ClassMediumChunk) { + return MediumIndex; + } else if (size > ClassMediumChunk) { + assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size"); + return HumongousIndex; + } + } else { + if (size == SpecializedChunk) { + return SpecializedIndex; + } else if (size == SmallChunk) { + return SmallIndex; + } else if (size == MediumChunk) { + return MediumIndex; + } else if (size > MediumChunk) { + assert(is_aligned(size, SpecializedChunk), "Invalid chunk size"); + return HumongousIndex; + } + } + ShouldNotReachHere(); + return (ChunkIndex)-1; +} + + static ChunkIndex next_chunk_index(ChunkIndex i) { assert(i < NumberOfInUseLists, "Out of bound"); return (ChunkIndex) (i+1); @@ -120,6 +141,19 @@ } } +// tracing for coalescation things +static void trace_coalescation_info(const char* msg, ...) { + LogTarget(Info, gc, metaspace, freelist) lt; + if (lt.is_enabled()) { + char buf[1024]; + va_list ap; + va_start(ap, msg); + jio_vsnprintf(buf, sizeof(buf), msg, ap); + va_end(ap); + lt.print("[%u] %s\n", (unsigned) os::current_thread_id(), buf); + } +} + volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; uint MetaspaceGC::_shrink_factor = 0; bool MetaspaceGC::_should_concurrent_collect = false; @@ -136,6 +170,9 @@ // MediumChunk ChunkList _free_chunks[NumberOfFreeLists]; + // Whether or not this is the class chunkmanager. + const bool _is_class; + // Return non-humongous chunk list by its index. ChunkList* free_chunks(ChunkIndex index); @@ -178,6 +215,22 @@ } void verify_free_chunks_count(); + // Given a pointer to a chunk, attempts to coalesce it with the neighboring chunks to + // a bigger chunk type. Coalescation works if the neighboring chunks also happen to be free. + // Valid combinations are: a specialized chunk can be merged with neighbors to form a small or + // medium chunk; a small chunk can be merged with neighbors to form a medium chunk. + // Returns true if successful. + bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type); + + // Helper for chunk coalescation: + // Given an address range with 1-n chunks which are all supposed to be free and currently + // managed by this ChunkManager, remove them from this ChunkManager and mark them as invalid. + // - This does not correct the occupancy map. + // - This does not adjust the counters in ChunkManager. + // - Does not adjust container count counter in containing VirtualSpaceNode + // Returns number of chunks removed. + int remove_chunks_in_area(MetaWord* p, size_t word_size); + struct ChunkManagerStatistics { size_t num_by_type[NumberOfFreeLists]; size_t single_size_by_type[NumberOfFreeLists]; @@ -192,11 +245,12 @@ public: - ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) - : _free_chunks_total(0), _free_chunks_count(0) { - _free_chunks[SpecializedIndex].set_size(specialized_size); - _free_chunks[SmallIndex].set_size(small_size); - _free_chunks[MediumIndex].set_size(medium_size); + ChunkManager(bool is_class) + : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) + { + _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class)); + _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class)); + _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class)); } // add or delete (return) a chunk to the global freelist. @@ -209,6 +263,13 @@ // Map a given index to the chunk size. size_t size_by_index(ChunkIndex index) const; + bool is_class() const { return _is_class; } + + // Convenience accessors. + size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); } + size_t small_chunk_word_size() const { return size_by_index(SmallIndex); } + size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); } + // Take a chunk from the ChunkManager. The chunk is expected to be in // the chunk manager (the freelist if non-humongous, the dictionary if // humongous). @@ -391,6 +452,282 @@ void print_on(outputStream* st) const; }; +// helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant. +template struct all_ones { static const T value; }; +template <> struct all_ones { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; }; +template <> struct all_ones { static const uint32_t value = 0xFFFFFFFF; }; + +// The OccupancyMap is a bitmap which, for a given VirtualSpaceNode, keeps information +// about +// - where a chunk starts +// - whether a chunk is in-use or free +// The Bitmap is organized as two-bits per smallest chunk size. This is the specialized +// chunk size (currently 128 MetaWords), and all other chunk sizes are multiples of this +// size. +class OccupancyMap : public CHeapObj { + + // the address range this map covers + const MetaWord* const _reference_address; + const size_t _word_size; + + // the word size of a specialized chunk, aka the number of words one bit in this map + // represents + const size_t _smallest_chunk_word_size; + + // map data + // Data are organized in two bit layers: + // the first layer is the chunk-start-map. Here, a bit is set to mark the corresponding + // specialized-chunk-sized region as the head of a chunk. + // the second layer is the in-use-map. Here, a set bit indicates that the corresponding + // specialized-chunk-sized region is in use. + uint8_t* _map[2]; + + enum { layer_chunk_start_map = 0, layer_in_use_map = 1 }; + + // length, in bytes, of bitmap data + size_t _map_size; + + // returns true if bit at position pos at bit-layer layer is set + bool get_bit_at_position(unsigned pos, unsigned layer) const { + assert(layer == 0 || layer == 1, "Invalid layer %d", layer); + const unsigned byteoffset = pos / 8; + assert(byteoffset < _map_size, + "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size); + const unsigned mask = 1 << (pos % 8); + return _map[layer][byteoffset] & mask; + } + + // changes bit at position pos at bit-layer layer to value v + void set_bit_at_position(unsigned pos, unsigned layer, bool v) { + assert(layer == 0 || layer == 1, "Invalid layer %d", layer); + const unsigned byteoffset = pos / 8; + assert(byteoffset < _map_size, + "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size); + const unsigned mask = 1 << (pos % 8); + if (v) { + _map[layer][byteoffset] |= mask; + } else { + _map[layer][byteoffset] &= ~mask; + } + } + + // optimized case of is_any_bit_set_in_region for 32/64bit aligned access: pos is 32/64 aligned and num_bits is 32/64. + // This is the typical case when coalescing to medium chunks, whose size is 32 or 64 times the specialized + // chunk size (depending on class or non class case), so they occupy 64 bits which should be 64bit aligned, + // because chunks are chunk-size aligned. + template + bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const { + assert(_map_size > 0, "not initialized"); + assert(layer == 0 || layer == 1, "Invalid layer %d", layer); + assert(pos % (sizeof(T) * 8) == 0, "bit position must be aligned (%u)", pos); + assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u)", num_bits); + const size_t byteoffset = pos / 8; + assert(byteoffset <= (_map_size - sizeof(T)), + "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size); + const T w = *(T*)(_map[layer] + byteoffset); + return w > 0 ? true : false; + } + + // returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer. + bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const { + if (pos % 32 == 0 && num_bits == 32) { + return is_any_bit_set_in_region_3264(pos, num_bits, layer); + } else if (pos % 64 == 0 && num_bits == 64) { + return is_any_bit_set_in_region_3264(pos, num_bits, layer); + } else { + for (unsigned n = 0; n < num_bits; n ++) { + if (get_bit_at_position(pos + n, layer)) { + return true; + } + } + } + return false; + } + + // returns true if any bit in region [p, p+word_size) is set in bit-layer layer. + bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const { + assert(word_size % _smallest_chunk_word_size == 0, + "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size); + const unsigned pos = get_bitpos_for_address(p); + const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size); + return is_any_bit_set_in_region(pos, num_bits, layer); + } + + // optimized case of set_bits_of_region for 32/64bit aligned access: pos is 32/64 aligned and num_bits is 32/64. + // This is the typical case when coalescing to medium chunks, whose size is 32 or 64 times the specialized + // chunk size (depending on class or non class case), so they occupy 64 bits which should be 64bit aligned, + // because chunks are chunk-size aligned. + template + void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) { + assert(pos % (sizeof(T) * 8) == 0, "bit position must be aligned to %u (%u)", + (unsigned)(sizeof(T) * 8), pos); + assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u", + num_bits, (unsigned)(sizeof(T) * 8)); + const size_t byteoffset = pos / 8; + assert(byteoffset <= (_map_size - sizeof(T)), + "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size); + T* const pw = (T*)(_map[layer] + byteoffset); + *pw = v ? all_ones::value : (T) 0; + } + + // set all bits in a region starting at pos to a value + void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) { + assert(_map_size > 0, "not initialized"); + assert(layer == 0 || layer == 1, "Invalid layer %d", layer); + if (pos % 32 == 0 && num_bits == 32) { + set_bits_of_region_T(pos, num_bits, layer, v); + } else if (pos % 64 == 0 && num_bits == 64) { + set_bits_of_region_T(pos, num_bits, layer, v); + } else { + for (unsigned n = 0; n < num_bits; n ++) { + set_bit_at_position(pos + n, layer, v); + } + } + } + + // Helper: sets all bits in a region [p, p+word_size) + void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) { + assert(word_size % _smallest_chunk_word_size == 0, + "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size); + const unsigned pos = get_bitpos_for_address(p); + const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size); + set_bits_of_region(pos, num_bits, layer, v); + } + + // helper: given an address, return the bit position representing that address + unsigned get_bitpos_for_address(const MetaWord* p) const { + assert(_reference_address != NULL, "not initialized"); + assert(p >= _reference_address && p < _reference_address + _word_size, + "Address %p out of range for occupancy map [%p..%p)", + p, _reference_address, _reference_address + _word_size); + assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)), + "Address not aligned (%p)", p); + const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size; + assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity."); + return (unsigned) d; + } + +public: + + OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) +: _reference_address(reference_address), _word_size(word_size), _smallest_chunk_word_size(smallest_chunk_word_size) +{ + assert(reference_address != NULL, "invalid reference address"); + assert(is_aligned(reference_address, smallest_chunk_word_size), + "reference address not aligned to smallest chunk size"); + assert(is_aligned(word_size, smallest_chunk_word_size), + "word_size shall be a multiple of the smallest chunk size"); + // calc bitmap size: one bit per smallest_chunk_word_size'd area + size_t num_bits = word_size / smallest_chunk_word_size; + _map_size = (num_bits + 7) / 8; + assert(_map_size * 8 >= num_bits, "sanity"); + _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal); + _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal); + assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed"); + memset(_map[1], 0, _map_size); + memset(_map[0], 0, _map_size); + // Sanity test: the first respectively last possible chunk start address in the covered range shall map to the + // first and last bit in the bitmap. + assert(get_bitpos_for_address(reference_address) == 0, + "First chunk address in range must map to fist bit in bitmap."); + assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1, + "Last chunk address in range must map to last bit in bitmap."); +} + + ~OccupancyMap() { + os::free(_map[0]); + os::free(_map[1]); + } + + // returns true if at address x a chunk is starting + bool chunk_starts_at_address(MetaWord* p) const { + const unsigned pos = get_bitpos_for_address(p); + return get_bit_at_position(pos, layer_chunk_start_map); + } + + void set_chunk_starts_at_address(MetaWord* p, bool v) { + const unsigned pos = get_bitpos_for_address(p); + set_bit_at_position(pos, layer_chunk_start_map, v); + } + + // removes all chunk-start-bits inside a region, typically as a result of a coalescation. + void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) { + set_bits_of_region(p, word_size, layer_chunk_start_map, false); + } + + // returns true if there are life (in use) chunks in the region limited by [p, p+word_size). + bool is_region_in_use(MetaWord* p, size_t word_size) const { + return is_any_bit_set_in_region(p, word_size, layer_in_use_map); + } + + // marks the region starting at p with the size word_size as in use or free, depending on v. + void set_region_in_use(MetaWord* p, size_t word_size, bool v) { + set_bits_of_region(p, word_size, layer_in_use_map, v); + } + + // verify occupancy map for the address range [from, to). + // We need to tell it the address range, because the memory the + // occupancy map is covering may not be fully comitted yet. + void verify(MetaWord* from, MetaWord* to) { + Metachunk* chunk = NULL; + int nth_bit_for_chunk = 0; + MetaWord* chunk_end = NULL; + for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) { + const unsigned pos = get_bitpos_for_address(p); + // check the chunk-starts-info: + if (get_bit_at_position(pos, layer_chunk_start_map)) { + // chunk start marked in bitmap + chunk = (Metachunk*) p; + if (chunk_end != NULL) { + guarantee(chunk_end == p, "Unexpected chunk start found at %p (expected the next chunk to start at %p).", + p, chunk_end); + } + guarantee(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p); + if (chunk->get_chunk_type() != HumongousIndex) { + guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p); + } + chunk_end = p + chunk->word_size(); + nth_bit_for_chunk = 0; + guarantee(chunk_end <= to, "Chunk end overlaps test address range."); + } else { + // No chunk start marked in bitmap + guarantee(chunk != NULL, "Chunk should start at start of address range."); + guarantee(p < chunk_end, "Did not find expected chunk start at %p", p); + nth_bit_for_chunk ++; + } + // check the in-use-info: + const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map); + if (in_use_bit) { + guarantee(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u)", + chunk, nth_bit_for_chunk); + } else { + guarantee(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u)", + chunk, nth_bit_for_chunk); + } + } + } + + // Verify that a given chunk is correctly accounted for in the bitmap. + void verify_for_chunk(Metachunk* chunk) { + guarantee(chunk_starts_at_address((MetaWord*) chunk), + "No chunk start marked in map for chunk %p.", chunk); + // For chunks larger than the minimal chunk size, no other chunk must start in its area. + if (chunk->word_size() > _smallest_chunk_word_size) { + guarantee(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size, + chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map), + "No chunk must start within another chunk."); + } + if (!chunk->is_tagged_free()) { + guarantee(is_region_in_use((MetaWord*)chunk, chunk->word_size()), + "Chunk %p is in use but marked as free in map.", chunk); + } else { + guarantee(!is_region_in_use((MetaWord*)chunk, chunk->word_size()), + "Chunk %p is free but marked as in-use in map.", chunk); + } + } + +}; // end: OccupancyMap + // A VirtualSpaceList node. class VirtualSpaceNode : public CHeapObj { friend class VirtualSpaceList; @@ -398,6 +735,9 @@ // Link to next VirtualSpaceNode VirtualSpaceNode* _next; + // Whether this node is contained in class or metaspace. + const bool _is_class; + // total in the VirtualSpace MemRegion _reserved; ReservedSpace _rs; @@ -406,6 +746,8 @@ // count of chunks contained in this VirtualSpace uintx _container_count; + OccupancyMap* _occupancy_map; + // Convenience functions to access the _virtual_space char* low() const { return virtual_space()->low(); } char* high() const { return virtual_space()->high(); } @@ -416,16 +758,24 @@ // Committed but unused space in the virtual space size_t free_words_in_vs() const; + + // true if this node belongs to class metaspace- + bool is_class() const { return _is_class; } + public: - VirtualSpaceNode(size_t byte_size); - VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} + VirtualSpaceNode(bool is_class, size_t byte_size); + VirtualSpaceNode(bool is_class, ReservedSpace rs) : + _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {} ~VirtualSpaceNode(); // Convenience functions for logical bottom and end MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } + const OccupancyMap* occupancy_map() const { return _occupancy_map; } + OccupancyMap* occupancy_map() { return _occupancy_map; } + bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } @@ -493,6 +843,10 @@ void print_on(outputStream* st) const; void print_map(outputStream* st, bool is_class) const; + + // Verify all chunks in this node. + void verify(); + }; #define assert_is_aligned(value, alignment) \ @@ -515,7 +869,7 @@ } // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { +VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) : _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) { assert_is_aligned(bytes, Metaspace::reserve_alignment()); bool large_pages = should_commit_large_pages_when_reserving(bytes); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); @@ -531,12 +885,16 @@ } void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { +#ifdef ASSERT + this->verify(); +#endif Metachunk* chunk = first_chunk(); Metachunk* invalid_chunk = (Metachunk*) top(); while (chunk < invalid_chunk ) { assert(chunk->is_tagged_free(), "Should be tagged free"); MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); chunk_manager->remove_chunk(chunk); + chunk->remove_sentinel(); assert(chunk->next() == NULL && chunk->prev() == NULL, "Was not removed from its list"); @@ -639,6 +997,7 @@ Metachunk* invalid_chunk = (Metachunk*) top(); while (chunk < invalid_chunk ) { MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + do_verify_chunk(chunk); // Don't count the chunks on the free lists. Those are // still part of the VirtualSpaceNode but not currently // counted. @@ -651,6 +1010,19 @@ } #endif +// Verify all chunks in this list node. +void VirtualSpaceNode::verify() { + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + do_verify_chunk(chunk); + chunk = (Metachunk*) next; + } + // Also verify the whole occupancy map + occupancy_map()->verify(this->bottom(), this->top()); +} + // List of VirtualSpaces for metadata allocation. class VirtualSpaceList : public CHeapObj { friend class VirtualSpaceNode; @@ -1078,6 +1450,9 @@ VirtualSpaceNode::~VirtualSpaceNode() { _rs.release(); + if (_occupancy_map != NULL) { + delete _occupancy_map; + } #ifdef ASSERT size_t word_size = sizeof(*this) / BytesPerWord; Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); @@ -1101,6 +1476,95 @@ // This interface is also used internally for debugging. Not all // chunks removed here are necessarily used for allocation. Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { + // Note: Non-humongous chunks are to be allocated aligned to their chunk size. So, + // Medium chunks are aligned to medium chunk size boundaries, small chunks to + // small chunk boundaries, and so forth. This is to facilitate free chunk coalescation + // and to reduce fragmentation. + // Chunk sizes are spec < small < medium, with both small and medium being common + // multiples of spec size. So the smallest grain chunk border is specialized chunk + // size. + // Because of this alignment, me may need to create a number of padding chunks. + // These chunks are created and added to the freelist. + + // The chunk manager to which we will give our padding chunks. + ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class()); + + // shorthands + const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); + const size_t small_word_size = chunk_manager->small_chunk_word_size(); + const size_t med_word_size = chunk_manager->medium_chunk_word_size(); + + assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size || + chunk_word_size >= med_word_size, "Invalid chunk size requested."); + + // Chunk alignment (in bytes) == chunk size unless humongous. Humongous chunks are aligned to the smallest + // chunk size (spec). + const size_t required_chunk_alignment = (chunk_word_size > med_word_size ? spec_word_size : chunk_word_size) * sizeof(MetaWord); + + // Do we have enough space to create the requested chunk plus any padding chunks needed? + MetaWord* const next_aligned = + static_cast(align_up(top(), required_chunk_alignment)); + if (!is_available((next_aligned - top()) + chunk_word_size)) { + return NULL; + } + + // Before allocating the requested chunk, allocate padding chunks if necessary. + // We only need to do this for small or medium chunks: specialized chunks are the + // smallest size, hence always aligned. Homungous chunks are allocated unaligned + // (implicitly, also aligned to smallest chunk size). + if (chunk_word_size == med_word_size || chunk_word_size == small_word_size) { + int num_small_padding_chunks_created = 0; + int num_specialized_padding_chunks_created = 0; + + if (next_aligned > top()) { + trace_coalescation_info("Coalescation: creating padding chunks between %p and %p...", + top(), next_aligned); + } + + // Allocate padding chunks + while (next_aligned > top()) { + size_t padding_chunk_word_size = small_word_size; + if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) { + assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true. + padding_chunk_word_size = spec_word_size; + } + MetaWord* here = top(); + assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord)); + inc_top(padding_chunk_word_size); + + // create new padding chunk + ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class()); + if (padding_chunk_type == SmallIndex) { + num_small_padding_chunks_created ++; + } else if (padding_chunk_type == SpecializedIndex) { + num_specialized_padding_chunks_created ++; + } else ShouldNotReachHere(); + + Metachunk* const padding_chunk = + ::new (here) Metachunk(padding_chunk_type, is_class(), chunk_origin_pad, padding_chunk_word_size, this); + trace_coalescation_info("Coalescation: created padding chunk at " PTR_FORMAT ", size " SIZE_FORMAT_HEX ".", + p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord)); + + // mark chunk start in occupancy map + occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true); + + // Chunks are born as in-use (see MetaChunk ctor). So, before returning the padding chunk to its chunk manager, + // mark it as in use (ChunkManager will assert that). + do_update_in_use_info_for_chunk(padding_chunk, true); + + // return Chunk to freelist. + chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk); + +#ifdef ASSERT + chunk_manager->locked_verify(); + do_verify_chunk(padding_chunk); +#endif + } + } // End: create padding chunks if necessary. + + // Now, top should be aligned correctly. + assert_is_aligned(top(), required_chunk_alignment); + // Bottom of the new chunk MetaWord* chunk_limit = top(); assert(chunk_limit != NULL, "Not safe to call this method"); @@ -1126,7 +1590,13 @@ inc_top(chunk_word_size); // Initialize the chunk - Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); + ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class()); + Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_origin_normal, chunk_word_size, this); + occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true); + do_update_in_use_info_for_chunk(result, true); + + DEBUG_ONLY(do_verify_chunk(result)); + return result; } @@ -1195,6 +1665,10 @@ _rs.size() / BytesPerWord); } + // Initialize Occupancy Map. + const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk; + _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size); + return result; } @@ -1279,6 +1753,128 @@ account_for_removed_chunk(chunk); } +bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) { + assert_lock_strong(SpaceManager::expand_lock()); + assert(chunk != NULL, "invalid chunk pointer"); + // Check for valid coalescation combinations. + assert((chunk->get_chunk_type() == SpecializedIndex && (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) || + (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex), + "Invalid chunk coalescation combination."); + + const size_t target_chunk_word_size = get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class()); + + MetaWord* const p_coalescation_start = + (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord)); + MetaWord* const p_coalescation_end = + p_coalescation_start + target_chunk_word_size; + + // we need the VirtualSpaceNode containing this chunk and its occupancy map. + VirtualSpaceNode* const vsn = chunk->container(); + OccupancyMap* const ocmap = vsn->occupancy_map(); + + // The potential coalescation range shall be completely inside + // the committed range of the virtual space node. + if (p_coalescation_start < vsn->bottom() || p_coalescation_end > vsn->top()) { + return false; + } + + // Only attempt to coalesce if at the start of the potential coalescation range a chunk starts and + // at the end of the potential coalescation range a chunk ends. If that is not the case - so, + // if a chunk straddles either start or end of the coalescation range, we cannot coalesce. + // Note that this should only happen with humongous chunks. + if (!ocmap->chunk_starts_at_address(p_coalescation_start)) { + return false; + } + + // (a chunk ends at the coalescation range end either if this is the end of the used area or + // if a new chunk starts right away). + if (p_coalescation_end < vsn->top()) { + if (!ocmap->chunk_starts_at_address(p_coalescation_end)) { + return false; + } + } + + // Now check if in the coalescation area there are still life chunks. + if (ocmap->is_region_in_use(p_coalescation_start, target_chunk_word_size)) { + return false; + } + + // Success! Remove all chunks in this region. + trace_coalescation_info("Coalescation: coalescing chunks in area [%p-%p)...", p_coalescation_start, p_coalescation_end); + + const int num_chunks_removed = + remove_chunks_in_area(p_coalescation_start, target_chunk_word_size); + + // and create a single new bigger chunk. + Metachunk* const p_new_chunk = + ::new (p_coalescation_start) Metachunk(target_chunk_type, is_class(), chunk_origin_coalescation, target_chunk_word_size, vsn); + + trace_coalescation_info("Coalescation: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".", + p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord)); + + // fix occupancy map: remove old start bits of the small chunks and set new start bit + ocmap->wipe_chunk_start_bits_in_region(p_coalescation_start, target_chunk_word_size); + ocmap->set_chunk_starts_at_address(p_coalescation_start, true); + + // Mark chunk as free. Note: it is not necessary to update the occupancy map in-use map, because + // the old chunks were also free, so nothing should have changed. + p_new_chunk->set_is_tagged_free(true); + + // add new chunk to its freelist + ChunkList* const list = free_chunks(target_chunk_type); + list->return_chunk_at_head(p_new_chunk); + + // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total should not have changed, because + // the size of the space should be the same) + _free_chunks_count -= num_chunks_removed; + _free_chunks_count ++; + + // VirtualSpaceNode::container_count does not have to be modified: it means "number of active (non-free) + // chunks", so coalescation of free chunks should not affect that count. + + // At the end of a coalescation, run verification tests +#ifdef ASSERT + vsn->verify(); +#endif + + return true; + +} + +// remove all chunks in the given area - the chunks are supposed to be free - +// from their corresponding freelists. Mark them as invalid. +// - This does not correct the occupancy map. +// - This does not adjust the counters in ChunkManager. +// - Does not adjust container count counter in containing VirtualSpaceNode +// returns number of chunks removed. +int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) { + assert(p != NULL && word_size > 0, "Invalid range."); + const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class()); + assert_is_aligned(word_size, smallest_chunk_size); + + Metachunk* const start = (Metachunk*) p; + const Metachunk* const end = (Metachunk*)(p + word_size); + Metachunk* cur = start; + int num_removed = 0; + while (cur < end) { + Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size()); + DEBUG_ONLY(do_verify_chunk(cur)); + assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur); + assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur); + trace_coalescation_info("Coalescation: removing chunk %p, size " SIZE_FORMAT_HEX ".", + cur, cur->word_size() * sizeof(MetaWord)); + cur->remove_sentinel(); + // Note: cannot call ChunkManager::remove_chunk, because that modifies the counters in ChunkManager, + // which we do not want. So we call remove_chunk on the freelist directly (see also the splitting + // function which does the same) + ChunkList* const list = free_chunks(list_index(cur->word_size())); + list->remove_chunk(cur); + num_removed ++; + cur = next; + } + return num_removed; +} + // Walk the list of VirtualSpaceNodes and delete // nodes with a 0 container_count. Remove Metachunks in // the node from their respective freelists. @@ -1358,6 +1954,7 @@ void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { DEBUG_ONLY(verify_container_count();) + assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?"); for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { ChunkIndex index = (ChunkIndex)i; size_t chunk_size = chunk_manager->size_by_index(index); @@ -1394,7 +1991,7 @@ _virtual_space_count(0) { MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); - VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); + VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs); bool succeeded = class_entry->initialize(); if (succeeded) { link_vs(class_entry); @@ -1426,7 +2023,7 @@ assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); // Allocate the meta virtual space and initialize it. - VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); + VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size); if (!new_entry->initialize()) { delete new_entry; return false; @@ -2116,11 +2713,24 @@ chunk_size_name(index), p2i(chunk), chunk->word_size()); } chunk->container()->dec_container_count(); - chunk->set_is_tagged_free(true); + do_update_in_use_info_for_chunk(chunk, false); // Chunk has been added; update counters. account_for_added_chunk(chunk); + // Attempt coalesce returned chunks with its neighboring chunks: + // if this chunk is small or special, attempt to coalesce to a medium chunk + if (index == SmallIndex || index == SpecializedIndex) { + if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) { + // This did not work. But if this chunk is special, we still may form a small chunk? + if (index == SpecializedIndex) { + if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) { + // give up. + } + } + } + } + } void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) { @@ -2589,6 +3199,11 @@ MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); + assert(sum_count_in_chunks_in_use() == allocated_chunks_count(), + "sum_count_in_chunks_in_use() " SIZE_FORMAT + " allocated_chunks_count() " SIZE_FORMAT, + sum_count_in_chunks_in_use(), allocated_chunks_count()); + chunk_manager()->slow_locked_verify(); dec_total_from_size_metrics(); @@ -2808,8 +3423,7 @@ for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { Metachunk* curr = chunks_in_use(i); while (curr != NULL) { - curr->verify(); - verify_chunk_size(curr); + do_verify_chunk(curr); curr = curr->next(); } } @@ -3650,7 +4264,7 @@ SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); assert(using_class_space(), "Must be using class space"); _class_space_list = new VirtualSpaceList(rs); - _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); + _chunk_manager_class = new ChunkManager(true); if (!_class_space_list->initialization_succeeded()) { vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); @@ -3757,7 +4371,7 @@ // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); - _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); + _chunk_manager_metadata = new ChunkManager(false); if (!_space_list->initialization_succeeded()) { vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); @@ -4100,6 +4714,22 @@ } } +static void do_verify_chunk(Metachunk* chunk) { + guarantee(chunk != NULL, "Sanity"); + // Verify chunk itself; then verify that it is consistent with the occupany map + // of its containing node. + chunk->verify(); + VirtualSpaceNode* const vsn = chunk->container(); + OccupancyMap* const ocmap = vsn->occupancy_map(); + ocmap->verify_for_chunk(chunk); +} + +static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) { + chunk->set_is_tagged_free(!inuse); + OccupancyMap* const ocmap = chunk->container()->occupancy_map(); + ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse); +} + /////////////// Unit tests /////////////// #ifndef PRODUCT @@ -4190,16 +4820,16 @@ STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); { // No committed memory in VSN - ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); - VirtualSpaceNode vsn(vsn_test_size_bytes); + ChunkManager cm(false); + VirtualSpaceNode vsn(false, vsn_test_size_bytes); vsn.initialize(); vsn.retire(&cm); assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); } { // All of VSN is committed, half is used by chunks - ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); - VirtualSpaceNode vsn(vsn_test_size_bytes); + ChunkManager cm(false); + VirtualSpaceNode vsn(false, vsn_test_size_bytes); vsn.initialize(); vsn.expand_by(vsn_test_size_words, vsn_test_size_words); vsn.get_chunk_vs(MediumChunk); @@ -4213,8 +4843,8 @@ // This doesn't work for systems with vm_page_size >= 16K. if (page_chunks < MediumChunk) { // 4 pages of VSN is committed, some is used by chunks - ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); - VirtualSpaceNode vsn(vsn_test_size_bytes); + ChunkManager cm(false); + VirtualSpaceNode vsn(false, vsn_test_size_bytes); vsn.initialize(); vsn.expand_by(page_chunks, page_chunks); @@ -4234,8 +4864,8 @@ } { // Half of VSN is committed, a humongous chunk is used - ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); - VirtualSpaceNode vsn(vsn_test_size_bytes); + ChunkManager cm(false); + VirtualSpaceNode vsn(false, vsn_test_size_bytes); vsn.initialize(); vsn.expand_by(MediumChunk * 2, MediumChunk * 2); vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk @@ -4266,7 +4896,7 @@ static void test_is_available_positive() { // Reserve some memory. - VirtualSpaceNode vsn(os::vm_allocation_granularity()); + VirtualSpaceNode vsn(false, os::vm_allocation_granularity()); assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); // Commit some memory. @@ -4284,7 +4914,7 @@ static void test_is_available_negative() { // Reserve some memory. - VirtualSpaceNode vsn(os::vm_allocation_granularity()); + VirtualSpaceNode vsn(false, os::vm_allocation_granularity()); assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); // Commit some memory. @@ -4299,7 +4929,7 @@ static void test_is_available_overflow() { // Reserve some memory. - VirtualSpaceNode vsn(os::vm_allocation_granularity()); + VirtualSpaceNode vsn(false, os::vm_allocation_granularity()); assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); // Commit some memory. @@ -4332,7 +4962,7 @@ // The following test is placed here instead of a gtest / unittest file // because the ChunkManager class is only available in this file. void ChunkManager_test_list_index() { - ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk); + ChunkManager manager(true); // Test previous bug where a query for a humongous class metachunk, // incorrectly matched the non-class medium metachunk size. @@ -4553,8 +5183,8 @@ public: ChunkManagerReturnTestImpl() - : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) - , _cm(SpecializedChunk, SmallChunk, MediumChunk) + : _vsn(false, align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) + , _cm(false) , _chunks_in_chunkmanager(0) , _words_in_chunkmanager(0) { diff -r 589a6f1d86e9 -r 828b7c0af987 src/hotspot/share/memory/metaspace.hpp --- a/src/hotspot/share/memory/metaspace.hpp Sat Dec 09 07:50:50 2017 -0800 +++ b/src/hotspot/share/memory/metaspace.hpp Wed Dec 13 18:11:45 2017 +0100 @@ -175,6 +175,10 @@ assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata(); } + // Convenience function + static ChunkManager* get_chunk_manager(bool is_class) { + return is_class ? chunk_manager_class() : chunk_manager_metadata(); + } static const MetaspaceTracer* tracer() { return _tracer; } static void freeze() { diff -r 589a6f1d86e9 -r 828b7c0af987 test/hotspot/gtest/memory/test_metachunk.cpp --- a/test/hotspot/gtest/memory/test_metachunk.cpp Sat Dec 09 07:50:50 2017 -0800 +++ b/test/hotspot/gtest/memory/test_metachunk.cpp Wed Dec 13 18:11:45 2017 +0100 @@ -41,11 +41,13 @@ }; TEST(Metachunk, basic) { - size_t size = 2 * 1024 * 1024; + const ChunkIndex chunk_type = MediumIndex; + const bool is_class = false; + const size_t size = get_size_for_nonhumongous_chunktype(chunk_type, is_class); void* memory = malloc(size); ASSERT_TRUE(NULL != memory) << "Failed to malloc 2MB"; - Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL); + Metachunk* metachunk = ::new (memory) Metachunk(chunk_type, is_class, chunk_origin_normal, size / BytesPerWord, NULL); EXPECT_EQ((MetaWord*) metachunk, metachunk->bottom()); EXPECT_EQ((uintptr_t*) metachunk + metachunk->size(), metachunk->end());