/* * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/metachunk.hpp" #include "memory/metaspace/metaDebug.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" #include "memory/metaspace/spaceManager.hpp" #include "memory/metaspace/virtualSpaceList.hpp" #include "runtime/atomic.hpp" #include "runtime/init.hpp" #include "services/memoryService.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" namespace metaspace { // Given a requested allocation size, in words, returns the minimum size, in words, of an allocation from metaspace. // A metaspace allocation must be large enough to hold a Metablock. This is because deallocated allocations // are kept in the block freelist. static size_t get_allocation_word_size(size_t requested_word_size) { size_t byte_size = requested_word_size * BytesPerWord; byte_size = MAX2(byte_size, sizeof(Metablock)); byte_size = align_up(byte_size, Metachunk::allocation_alignment_bytes); const size_t word_size = byte_size / BytesPerWord; assert(word_size * BytesPerWord == word_size, "Size problem"); return word_size; } // Given a requested word size, will allocate a chunk large enough to at least fit that // size, but may be larger according to the rules in the ChunkAllocSequence. // Updates counters and adds the chunk to the head of the chunk list. Metachunk* SpaceManager::allocate_chunk_to_fit(size_t requested_word_size) { guarantee(requested_word_size < chklvl::MAX_CHUNK_WORD_SIZE, "Requested size too large (" SIZE_FORMAT ").", requested_word_size); const chklvl_t min_level = chklvl::level_fitting_word_size(requested_word_size); chklvl_t max_level = _chunk_alloc_sequence->get_next_chunk_level(_num_chunks_total); if (max_level < min_level) { max_level = min_level; } Metachunk* c = _chunk_manager->get_chunk(min_level, max_level); assert(c != NULL, "Could not get a chunk"); assert(c->level() >= min_level && c->level() <= max_level, "Sanity"); _chunks.add(c); _current_chunk = c; return c; } void SpaceManager::create_block_freelist() { assert(_block_freelist == NULL, "Only call once"); _block_freelist = new BlockFreelist(); } SpaceManager::SpaceManager(ChunkManager* chunk_manager, const ChunkAllocSequence* alloc_sequence, Mutex* lock) : _lock(lock), _chunk_manager(chunk_manager), _chunk_alloc_sequence(alloc_sequence), _chunks(), _current_chunk(NULL), _block_freelist(NULL) { } SpaceManager::~SpaceManager() { assert_lock_strong(lock()); MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); // Return all chunks to our chunk manager. // Note: this destroys the _chunks list. Metachunk* c = _chunks.first(); Metachunk* c2 = NULL; while(c) { c2 = c->next(); _chunk_manager->return_chunk(c); c = c2; } #ifdef ASSERT EVERY_NTH(VerifyMetaspaceInterval) chunk_manager()->verify(true); END_EVERY_NTH #endif delete _block_freelist; } // The current chunk is unable to service a request. The remainder of the chunk is // chopped into blocks and fed into the _block_freelists, in the hope of later reuse. void SpaceManager::retire_current_chunk() { Metachunk* c = _current_chunk; assert(c != NULL, "Sanity"); assert(c->used_words() > 0, "Why do we retire an empty chunk?"); size_t remaining_words = c->free_below_committed_words(); if (remaining_words >= SmallBlocks::small_block_min_size()) { bool did_hit_limit = false; MetaWord* ptr = c->allocate(remaining_words, &did_hit_limit); assert(ptr != NULL && did_hit_limit == false, "Should have worked"); deallocate(ptr, remaining_words); } } // Allocate memory from Metaspace. // 1) Attempt to allocate from the dictionary of deallocated blocks. // 2) Failing that, attempt to allocate from the current chunk. If this // fails because the chunk needed to be committed and we hit a commit limit, return NULL. // 3) Attempt to get a new chunk and allocate from that chunk. Again, we may hit a commit // limit, in which case we return NULL. MetaWord* SpaceManager::allocate(size_t requested_word_size) { MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); const size_t word_size = align_up(requested_word_size, get_allocation_word_size(requested_word_size)); MetaWord* p = NULL; bool did_hit_limit = false; // Allocate first chunk if needed. if (_current_chunk == NULL) { Metachunk* c = allocate_chunk_to_fit(requested_word_size); assert(c != NULL && _chunks.size() == 1 && c == _current_chunk, "Should be"); } // 1) Attempt to allocate from the dictionary of deallocated blocks. // Allocation from the dictionary is expensive in the sense that // the dictionary has to be searched for a size. Don't allocate // from the dictionary until it starts to get fat. Is this // a reasonable policy? Maybe an skinny dictionary is fast enough // for allocations. Do some profiling. JJJ if (_block_freelist != NULL && _block_freelist->total_size() > constants::allocation_from_dictionary_limit) { p = _block_freelist->get_block(requested_word_size); if (p != NULL) { DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks)); } } // 2) Attempt to allocate from the current chunk. if (p == NULL && !did_hit_limit) { p = _current_chunk->allocate(requested_word_size, &did_hit_limit); } // 3) Attempt to get a new chunk and allocate from that chunk. if (p == NULL && !did_hit_limit) { // Old chunk is too small to hold requested size? assert(_current_chunk->free_words() < requested_word_size, "Sanity"); // Retire the old chunk. This will put all remainder space (committed // space only) into the block freelist. retire_current_chunk(); assert(_current_chunk->free_below_committed_words() == 0, "Sanity"); // Allocate a new chunk. Metachunk* c = allocate_chunk_to_fit(requested_word_size); assert(c != NULL && _chunks.size() > 0 && c == _current_chunk, "Should be"); p = _current_chunk->allocate(requested_word_size, &did_hit_limit); } assert(p != NULL || (p == NULL && did_hit_limit), "Sanity"); return p; } // Update statistics. This walks all in-use chunks. void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const { for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) { UsedChunksStatistics& ucs = out->chunk_stats[c->level()]; ucs.cap += c->word_size(); // Note: for free and waste, we only count what's committed. if (c == _current_chunk) { ucs.free += c->free_words(); } else { ucs.waste += c->free_words(); } } if (block_freelist() != NULL) { out->add_free_blocks_info(block_freelist()->num_blocks(), block_freelist()->total_size()); } } } // namespace metaspace