1 /* 2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 #include "logging/log.hpp" 27 #include "logging/logStream.hpp" 28 #include "memory/metaspace/chunkManager.hpp" 29 #include "memory/metaspace/metachunk.hpp" 30 #include "memory/metaspace/metaDebug.hpp" 31 #include "memory/metaspace/metaspaceCommon.hpp" 32 #include "memory/metaspace/spaceManager.hpp" 33 #include "memory/metaspace/virtualSpaceList.hpp" 34 #include "runtime/atomic.hpp" 35 #include "runtime/init.hpp" 36 #include "services/memoryService.hpp" 37 #include "utilities/align.hpp" 38 #include "utilities/debug.hpp" 39 #include "utilities/globalDefinitions.hpp" 40 41 namespace metaspace { 42 43 // Given a requested allocation size, in words, returns the minimum size, in words, of an allocation from metaspace. 44 // A metaspace allocation must be large enough to hold a Metablock. This is because deallocated allocations 45 // are kept in the block freelist. 46 static size_t get_allocation_word_size(size_t requested_word_size) { 47 48 size_t byte_size = requested_word_size * BytesPerWord; 49 byte_size = MAX2(byte_size, sizeof(Metablock)); 50 byte_size = align_up(byte_size, Metachunk::allocation_alignment_bytes); 51 52 const size_t word_size = byte_size / BytesPerWord; 53 assert(word_size * BytesPerWord == word_size, "Size problem"); 54 55 return word_size; 56 } 57 58 // Given a requested word size, will allocate a chunk large enough to at least fit that 59 // size, but may be larger according to the rules in the ChunkAllocSequence. 60 // Updates counters and adds the chunk to the head of the chunk list. 61 Metachunk* SpaceManager::allocate_chunk_to_fit(size_t requested_word_size) { 62 63 guarantee(requested_word_size < chklvl::MAX_CHUNK_WORD_SIZE, 64 "Requested size too large (" SIZE_FORMAT ").", requested_word_size); 65 66 const chklvl_t min_level = chklvl::level_fitting_word_size(requested_word_size); 67 chklvl_t max_level = _chunk_alloc_sequence->get_next_chunk_level(_num_chunks_total); 68 69 if (max_level < min_level) { 70 max_level = min_level; 71 } 72 73 Metachunk* c = _chunk_manager->get_chunk(min_level, max_level); 74 assert(c != NULL, "Could not get a chunk"); 75 assert(c->level() >= min_level && c->level() <= max_level, "Sanity"); 76 77 _num_chunks_by_level[c->level()] ++; 78 _num_chunks_total ++; 79 80 _chunks.add(c); 81 _current_chunk = c; 82 83 return c; 84 85 } 86 87 void SpaceManager::create_block_freelist() { 88 assert(_block_freelist == NULL, "Only call once"); 89 _block_freelist = new BlockFreelist(); 90 } 91 92 SpaceManager::SpaceManager(ChunkManager* chunk_manager, const ChunkAllocSequence* alloc_sequence, Mutex* lock) 93 : _lock(lock), 94 _chunk_manager(chunk_manager), 95 _chunk_alloc_sequence(alloc_sequence), 96 _chunks(), 97 _current_chunk(NULL), 98 _block_freelist(NULL), 99 _num_chunks_by_level { 0 }, 100 _num_chunks_total(0) 101 { 102 } 103 104 SpaceManager::~SpaceManager() { 105 106 assert_lock_strong(lock()); 107 108 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); 109 110 // Return all chunks to our chunk manager. 111 // Note: this destroys the _chunks list. 112 Metachunk* c = _chunks.first(); 113 Metachunk* c2 = NULL; 114 while(c) { 115 c2 = c->next(); 116 _chunk_manager->return_chunk(c); 117 c = c2; 118 } 119 120 #ifdef ASSERT 121 EVERY_NTH(VerifyMetaspaceInterval) 122 chunk_manager()->verify(true); 123 END_EVERY_NTH 124 #endif 125 126 delete _block_freelist; 127 128 } 129 130 // The current chunk is unable to service a request. The remainder of the chunk is 131 // chopped into blocks and fed into the _block_freelists, in the hope of later reuse. 132 void SpaceManager::retire_current_chunk() { 133 Metachunk* c = _current_chunk; 134 assert(c != NULL, "Sanity"); 135 assert(c->used_words() > 0, "Why do we retire an empty chunk?"); 136 size_t remaining_words = c->free_below_committed_words(); 137 if (remaining_words >= SmallBlocks::small_block_min_size()) { 138 bool did_hit_limit = false; 139 MetaWord* ptr = c->allocate(remaining_words, &did_hit_limit); 140 assert(ptr != NULL && did_hit_limit == false, "Should have worked"); 141 deallocate(ptr, remaining_words); 142 } 143 } 144 145 // Allocate memory from Metaspace. 146 // 1) Attempt to allocate from the dictionary of deallocated blocks. 147 // 2) Failing that, attempt to allocate from the current chunk. If this 148 // fails because the chunk needed to be committed and we hit a commit limit, return NULL. 149 // 3) Attempt to get a new chunk and allocate from that chunk. Again, we may hit a commit 150 // limit, in which case we return NULL. 151 MetaWord* SpaceManager::allocate(size_t requested_word_size) { 152 153 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 154 155 const size_t word_size = align_up(word_size, get_allocation_word_size(requested_word_size)); 156 157 MetaWord* p = NULL; 158 159 bool did_hit_limit = false; 160 161 // Allocate first chunk if needed. 162 if (_current_chunk == NULL) { 163 Metachunk* c = allocate_chunk_to_fit(word_size); 164 assert(c != NULL && _chunks.size() == 1 && c == _current_chunk, "Should be"); 165 } 166 167 // 1) Attempt to allocate from the dictionary of deallocated blocks. 168 169 // Allocation from the dictionary is expensive in the sense that 170 // the dictionary has to be searched for a size. Don't allocate 171 // from the dictionary until it starts to get fat. Is this 172 // a reasonable policy? Maybe an skinny dictionary is fast enough 173 // for allocations. Do some profiling. JJJ 174 if (_block_freelist != NULL && _block_freelist->total_size() > constants::allocation_from_dictionary_limit) { 175 p = _block_freelist->get_block(word_size); 176 if (p != NULL) { 177 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks)); 178 } 179 } 180 181 // 2) Attempt to allocate from the current chunk. 182 if (p == NULL && !did_hit_limit) { 183 p = _current_chunk->allocate(word_size, &did_hit_limit); 184 } 185 186 // 3) Attempt to get a new chunk and allocate from that chunk. 187 if (p == NULL && !did_hit_limit) { 188 189 // Old chunk is too small to hold requested size? 190 assert(_current_chunk->free_words() < word_size, "Sanity"); 191 192 // Retire the old chunk. This will put all remainder space (committed 193 // space only) into the block freelist. 194 retire_current_chunk(); 195 assert(_current_chunk->free_below_committed_words() == 0, "Sanity"); 196 197 // Allocate a new chunk. 198 Metachunk* c = allocate_chunk_to_fit(word_size); 199 assert(c != NULL && _chunks.size() > 0 && c == _current_chunk, "Should be"); 200 201 p = _current_chunk->allocate(word_size, &did_hit_limit); 202 203 } 204 205 assert(p != NULL || (p == NULL && did_hit_limit), "Sanity"); 206 207 return p; 208 209 } 210 211 212 } // namespace metaspace 213