1 /* 2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 27 #include "logging/log.hpp" 28 #include "memory/metaspace/constants.hpp" 29 #include "memory/metaspace/chunkAllocSequence.hpp" 30 #include "memory/metaspace/chunkLevel.hpp" 31 #include "memory/metaspace/chunkManager.hpp" 32 #include "memory/metaspace/metachunk.hpp" 33 #include "memory/metaspace/metaspaceStatistics.hpp" 34 #include "memory/metaspace/virtualSpaceNode.hpp" 35 #include "memory/metaspace/virtualSpaceList.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "utilities/debug.hpp" 38 #include "utilities/globalDefinitions.hpp" 39 40 namespace metaspace { 41 42 43 // Return a single chunk to the freelist and adjust accounting. No merge is attempted. 44 void ChunkManager::return_chunk_simple(Metachunk* c) { 45 DEBUG_ONLY(c->verify(false)); 46 const chklvl_t lvl = c->level(); 47 _chunks[lvl].add(c); 48 _total_word_size.increment_by(c->word_size()); 49 } 50 51 // Take a single chunk from the given freelist and adjust counters. Returns NULL 52 // if there is no fitting chunk for this level. 53 Metachunk* ChunkManager::remove_first_chunk_at_level(chklvl_t l) { 54 DEBUG_ONLY(chklvl::check_valid_level(l);) 55 Metachunk* c = _chunks[l].remove_first(); 56 if (c != NULL) { 57 _total_word_size.decrement_by(c->word_size()); 58 } 59 return c; 60 } 61 62 // Creates a chunk manager with a given name (which is for debug purposes only) 63 // and an associated space list which will be used to request new chunks from 64 // (see get_chunk()) 65 ChunkManager::ChunkManager(const char* name, VirtualSpaceList* space_list) 66 : _vslist(space_list), 67 _name(name), 68 _chunks() 69 { 70 } 71 72 // Given a chunk we are about to handout to the caller, make sure it is committed 73 // according to constants::committed_words_on_fresh_chunks 74 bool ChunkManager::commit_chunk_before_handout(Metachunk* c) { 75 const size_t must_be_committed = MIN2(c->word_size(), constants::committed_words_on_fresh_chunks); 76 return c->ensure_committed(must_be_committed); 77 } 78 79 #ifdef ASSERT 80 // Given a splinters array returned from a split operation, check that it meets expectations 81 static void check_splinters_array(Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS], chklvl_t min, chklvl_t max) { 82 // The array shall contain splinters in the range [min, max] and nothing outside. The chunk levels for 83 // the chunks must match too. 84 for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { 85 if (l >= min && l < max) { 86 assert(splinters[l] != NULL, "Missing splinters"); 87 assert(splinters[l]->level() == l, "Unexpected level"); 88 splinters[l]->verify(false); 89 } else { 90 assert(splinters[l] == NULL, "Unexpected splinters"); 91 } 92 } 93 } 94 #endif 95 96 97 // Given a chunk which must be outside of a freelist and must be free, split it to 98 // meet a target level and return it. Splinters are added to the freelist. 99 Metachunk* ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chklvl_t target_level) { 100 101 assert(c->is_free() && c->level() > target_level, "Invalid chunk for splitting"); 102 DEBUG_ONLY(chklvl::check_valid_level(target_level);) 103 104 const chklvl_t orig_level = c->level(); 105 Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS] = { 0 }; 106 c = c->vsnode()->split(target_level, c, splinters); 107 108 // Splitting should never fail. 109 assert(c != NULL, "Split failed"); 110 assert(c->level() == target_level, "Sanity"); 111 DEBUG_ONLY(c->verify(false)); 112 DEBUG_ONLY(check_splinters_array(splinters, orig_level + 1, target_level);) 113 114 // Return splinters to freelist. 115 for (chklvl_t l = orig_level + 1; l <= target_level; l ++) { 116 return_chunk_simple(splinters[l]); 117 } 118 119 return c; 120 } 121 122 // Get a chunk and be smart about it. 123 // - 1) Attempt to find a free chunk of exactly the pref_level level 124 // - 2) Failing that, attempt to find a chunk smaller or equal the minimal level. 125 // - 3) Failing that, attempt to find a free chunk of larger size and split it. 126 // - 4) Failing that, attempt to allocate a new chunk from the connected virtual space. 127 // - Failing that, give up and return NULL. 128 // Note: this is not guaranteed to return a *committed* chunk. The chunk manager will 129 // attempt to commit the returned chunk according to constants::committed_words_on_fresh_chunks; 130 // but this may fail if we hit a commit limit. In that case, a partly uncommit chunk 131 // will be returned, and the commit is attempted again when we allocate from the chunk's 132 // uncommitted area. See also Metachunk::allocate. 133 Metachunk* ChunkManager::get_chunk(chklvl_t min_level, chklvl_t pref_level) { 134 135 assert_lock_strong(MetaspaceExpand_lock); 136 DEBUG_ONLY(chklvl::check_valid_level(min_level);) 137 DEBUG_ONLY(chklvl::check_valid_level(pref_level);) 138 139 Metachunk* c = NULL; 140 141 // 1) Attempt to find a free chunk of exactly the pref_level level 142 c = remove_first_chunk_at_level(pref_level); 143 144 // 2) Failing that, attempt to find a chunk smaller or equal the minimal level. 145 if (c == NULL) { 146 for (chklvl_t lvl = pref_level + 1; lvl <= min_level; lvl ++) { 147 c = remove_first_chunk_at_level(lvl); 148 if (c != NULL) { 149 break; 150 } 151 } 152 } 153 154 // 3) Failing that, attempt to find a free chunk of larger size and split it. 155 if (c == NULL) { 156 for (chklvl_t lvl = pref_level - 1; lvl >= chklvl::ROOT_CHUNK_LEVEL; lvl --) { 157 c = remove_first_chunk_at_level(lvl); 158 if (c != NULL) { 159 // Split chunk; add splinters to freelist 160 c = split_chunk_and_add_splinters(c, pref_level); 161 break; 162 } 163 } 164 } 165 166 // 4) Failing that, attempt to allocate a new chunk from the connected virtual space. 167 if (c == NULL) { 168 169 c = _vslist->allocate_root_chunk(); 170 171 // This should always work. Note that getting the root chunk may not mean we committed memory. 172 assert(c != NULL, "Unexpected"); 173 174 // Split this root chunk to the desired chunk size. 175 c = split_chunk_and_add_splinters(c, pref_level); 176 177 } 178 179 // Note that we should at this point have a chunk; should always work. If we hit 180 // a commit limit in the meantime, the chunk may still be uncommitted, but the chunk 181 // itself should exist now. 182 assert(c != NULL, "Unexpected"); 183 184 // Before returning the chunk, attempt to commit it according to the handout rules. 185 // If that fails, we ignore the error and return the uncommitted chunk. 186 if (commit_chunk_before_handout(c) == false) { 187 log_info(gc, metaspace)("Failed to commit chunk prior to handout."); 188 } 189 190 DEBUG_ONLY(verify(false);) 191 192 return c; 193 194 } // ChunkManager::get_chunk 195 196 197 // Return a single chunk to the ChunkManager and adjust accounting. May merge chunk 198 // with neighbors. 199 // Happens after a Classloader was unloaded and releases its metaspace chunks. 200 // !! Note: this may invalidate the chunk. Do not access the chunk after 201 // this function returns !! 202 void ChunkManager::return_chunk(Metachunk* c) { 203 204 assert_lock_strong(MetaspaceExpand_lock); 205 DEBUG_ONLY(c->verify(false);) 206 207 const chklvl_t orig_lvl = c->level(); 208 209 int num_merged[chklvl::NUM_CHUNK_LEVELS] = { 0 }; 210 Metachunk* c2 = c->vsnode()->merge(c, num_merged); 211 212 if (c2 != NULL) { 213 DEBUG_ONLY(c2->verify(false)); 214 215 // We did merge chunks and now have a bigger chunk. 216 assert(c2->level() < orig_lvl, "Sanity"); 217 218 // Adjust counters - the merged-in chunks have been removed from the free lists, but the counters 219 // in this chunk manager must be adjusted too. 220 size_t size_chunks_removed = 0; 221 for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { 222 if (num_merged[l] > 0) { 223 // Since we have a binary tree, we should exactly see one merge per level. 224 assert(num_merged[l] == 1, "sanity"); 225 _chunks[l].dec_counter_by(1); 226 size_chunks_removed += chklvl::word_size_for_level(l); 227 } 228 } 229 _total_word_size.decrement_by(size_chunks_removed); 230 231 c = c2; 232 } 233 234 return_chunk_simple(c); 235 236 } 237 238 239 ChunkManager* ChunkManager::_chunkmanager_class = NULL; 240 ChunkManager* ChunkManager::_chunkmanager_nonclass = NULL; 241 242 void ChunkManager::set_chunkmanager_class(ChunkManager* cm) { 243 assert(_chunkmanager_class == NULL, "Sanity"); 244 _chunkmanager_class = cm; 245 } 246 247 void ChunkManager::set_chunkmanager_nonclass(ChunkManager* cm) { 248 assert(_chunkmanager_nonclass == NULL, "Sanity"); 249 _chunkmanager_nonclass = cm; 250 } 251 252 253 // Update statistics. 254 void ChunkManager::add_to_statistics(ChunkManagerStatistics* out) const { 255 256 for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { 257 out->num_chunks[l] += _chunks[l].size(); 258 } 259 260 } 261 262 } // namespace metaspace 263 264 265