rev 57625 : [mq]: metaspace-improvement
1 /* 2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 #include "logging/log.hpp" 27 #include "logging/logStream.hpp" 28 #include "memory/metaspace/chunkManager.hpp" 29 #include "memory/metaspace/metachunk.hpp" 30 #include "memory/metaspace/metaDebug.hpp" 31 #include "memory/metaspace/metaspaceCommon.hpp" 32 #include "memory/metaspace/metaspaceStatistics.hpp" 33 #include "memory/metaspace/spaceManager.hpp" 34 #include "memory/metaspace/virtualSpaceList.hpp" 35 #include "runtime/atomic.hpp" 36 #include "runtime/init.hpp" 37 #include "services/memoryService.hpp" 38 #include "utilities/align.hpp" 39 #include "utilities/debug.hpp" 40 #include "utilities/globalDefinitions.hpp" 41 42 namespace metaspace { 43 44 // Given a requested net allocation word size, return the minimum 45 // word size this allocation would need (metaspace allocations must 46 // be large enough to hold a Metablock since deallocated allocations 47 // are kept in a block freelist). 48 static size_t get_allocation_word_size(size_t requested_word_size) { 49 50 size_t byte_size = requested_word_size * BytesPerWord; 51 byte_size = MAX2(byte_size, sizeof(Metablock)); 52 byte_size = align_up(byte_size, Metachunk::allocation_alignment_bytes); 53 54 const size_t word_size = byte_size / BytesPerWord; 55 56 assert(word_size * BytesPerWord == byte_size, "Size problem"); 57 58 return word_size; 59 } 60 61 // Given a requested word size, will allocate a chunk large enough to at least fit that 62 // size, but may be larger according to the rules in the ChunkAllocSequence. 63 // Updates counters and adds the chunk to the head of the chunk list. 64 Metachunk* SpaceManager::allocate_chunk_to_fit(size_t requested_word_size) { 65 66 assert_lock_strong(lock()); 67 68 guarantee(requested_word_size < chklvl::MAX_CHUNK_WORD_SIZE, 69 "Requested size too large (" SIZE_FORMAT ").", requested_word_size); 70 71 const chklvl_t min_level = chklvl::level_fitting_word_size(requested_word_size); 72 chklvl_t pref_level = _chunk_alloc_sequence->get_next_chunk_level(_chunks.size()); 73 74 if (pref_level > min_level) { 75 pref_level = min_level; 76 } 77 78 Metachunk* c = _chunk_manager->get_chunk(min_level, pref_level); 79 assert(c != NULL, "Could not get a chunk"); 80 assert(c->level() <= min_level && c->level() >= pref_level, "Sanity"); 81 82 _chunks.add(c); 83 _current_chunk = c; 84 85 return c; 86 87 } 88 89 void SpaceManager::create_block_freelist() { 90 assert(_block_freelist == NULL, "Only call once"); 91 _block_freelist = new BlockFreelist(); 92 } 93 94 SpaceManager::SpaceManager(ChunkManager* chunk_manager, const ChunkAllocSequence* alloc_sequence, Mutex* lock) 95 : _lock(lock), 96 _chunk_manager(chunk_manager), 97 _chunk_alloc_sequence(alloc_sequence), 98 _chunks(), 99 _current_chunk(NULL), 100 _block_freelist(NULL) 101 { 102 } 103 104 SpaceManager::~SpaceManager() { 105 106 MutexLocker fcl(lock(), Mutex::_no_safepoint_check_flag); 107 108 // Return all chunks to our chunk manager. 109 // Note: this destroys the _chunks list. 110 Metachunk* c = _chunks.first(); 111 Metachunk* c2 = NULL; 112 while(c) { 113 c2 = c->next(); 114 _chunk_manager->return_chunk(c); 115 c = c2; 116 } 117 118 #ifdef ASSERT 119 EVERY_NTH(VerifyMetaspaceInterval) 120 chunk_manager()->verify(true); 121 END_EVERY_NTH 122 #endif 123 124 delete _block_freelist; 125 126 } 127 128 // The current chunk is unable to service a request. The remainder of the chunk is 129 // chopped into blocks and fed into the _block_freelists, in the hope of later reuse. 130 void SpaceManager::retire_current_chunk() { 131 assert_lock_strong(lock()); 132 133 Metachunk* c = _current_chunk; 134 assert(c != NULL, "Sanity"); 135 assert(c->used_words() > 0, "Why do we retire an empty chunk?"); 136 size_t remaining_words = c->free_below_committed_words(); 137 if (remaining_words >= SmallBlocks::small_block_min_size()) { 138 bool did_hit_limit = false; 139 MetaWord* ptr = c->allocate(remaining_words, &did_hit_limit); 140 assert(ptr != NULL && did_hit_limit == false, "Should have worked"); 141 deallocate(ptr, remaining_words); 142 } 143 } 144 145 // Allocate memory from Metaspace. 146 // 1) Attempt to allocate from the dictionary of deallocated blocks. 147 // 2) Failing that, attempt to allocate from the current chunk. If this 148 // fails because the chunk needed to be committed and we hit a commit limit, return NULL. 149 // 3) Attempt to get a new chunk and allocate from that chunk. Again, we may hit a commit 150 // limit, in which case we return NULL. 151 MetaWord* SpaceManager::allocate(size_t requested_word_size) { 152 153 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 154 155 const size_t raw_word_size = get_allocation_word_size(requested_word_size); 156 157 MetaWord* p = NULL; 158 159 bool did_hit_limit = false; 160 161 // Allocate first chunk if needed. 162 if (_current_chunk == NULL) { 163 Metachunk* c = allocate_chunk_to_fit(raw_word_size); 164 assert(c != NULL && _chunks.size() == 1 && c == _current_chunk, "Should be"); 165 } 166 167 // 1) Attempt to allocate from the dictionary of deallocated blocks. 168 169 // Allocation from the dictionary is expensive in the sense that 170 // the dictionary has to be searched for a size. Don't allocate 171 // from the dictionary until it starts to get fat. Is this 172 // a reasonable policy? Maybe an skinny dictionary is fast enough 173 // for allocations. Do some profiling. JJJ 174 if (_block_freelist != NULL && _block_freelist->total_size() > constants::allocation_from_dictionary_limit) { 175 p = _block_freelist->get_block(raw_word_size); 176 if (p != NULL) { 177 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks)); 178 } 179 } 180 181 // 2) Attempt to allocate from the current chunk. 182 if (p == NULL && !did_hit_limit) { 183 p = _current_chunk->allocate(raw_word_size, &did_hit_limit); 184 } 185 186 // 3) Attempt to get a new chunk and allocate from that chunk. 187 if (p == NULL && !did_hit_limit) { 188 189 // Old chunk is too small to hold requested size? 190 assert(_current_chunk->free_words() < raw_word_size, "Sanity"); 191 192 // Retire the old chunk. This will put all remainder space (committed 193 // space only) into the block freelist. 194 retire_current_chunk(); 195 assert(_current_chunk->free_below_committed_words() == 0, "Sanity"); 196 197 // Allocate a new chunk. 198 Metachunk* c = allocate_chunk_to_fit(raw_word_size); 199 assert(c != NULL && _chunks.size() > 0 && c == _current_chunk, "Should be"); 200 201 p = _current_chunk->allocate(raw_word_size, &did_hit_limit); 202 203 } 204 205 assert(p != NULL || (p == NULL && did_hit_limit), "Sanity"); 206 207 return p; 208 209 } 210 211 // Prematurely returns a metaspace allocation to the _block_freelists because it is not 212 // needed anymore. 213 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 214 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 215 _block_freelist->return_block(p, word_size); 216 } 217 218 // Update statistics. This walks all in-use chunks. 219 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const { 220 221 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 222 223 for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) { 224 UsedChunksStatistics& ucs = out->chunk_stats[c->level()]; 225 ucs.cap += c->word_size(); 226 // Note: for free and waste, we only count what's committed. 227 if (c == _current_chunk) { 228 ucs.free += c->free_words(); 229 } else { 230 ucs.waste += c->free_words(); 231 } 232 } 233 234 if (block_freelist() != NULL) { 235 out->add_free_blocks_info(block_freelist()->num_blocks(), block_freelist()->total_size()); 236 } 237 238 } 239 240 #ifdef ASSERT 241 void SpaceManager::verify(bool slow) const { 242 243 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); 244 245 assert(_chunk_alloc_sequence != NULL && _chunk_manager != NULL, "Sanity"); 246 247 const Metachunk* c = _chunks.first(); 248 while (c != NULL) { 249 c->verify(slow); 250 c = c->next(); 251 } 252 253 } 254 #endif // ASSERT 255 256 257 } // namespace metaspace 258 --- EOF ---