1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "precompiled.hpp" 27 28 #include "logging/log.hpp" 29 30 #include "memory/metaspace/chunkLevel.hpp" 31 #include "memory/metaspace/chunkTree.hpp" 32 #include "memory/metaspace/commitLimiter.hpp" 33 #include "memory/metaspace/constants.hpp" 34 #include "memory/metaspace/counter.hpp" 35 #include "memory/metaspace/metachunk.hpp" 36 #include "memory/metaspace/metaspaceCommon.hpp" 37 #include "memory/metaspace/runningCounters.hpp" 38 #include "memory/metaspace/virtualSpaceNode.hpp" 39 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/os.hpp" 42 43 #include "utilities/align.hpp" 44 #include "utilities/debug.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 #include "utilities/ostream.hpp" 47 48 namespace metaspace { 49 50 #ifdef ASSERT 51 template <class T> 52 void check_is_aligned_to_commit_granule(T x) { 53 assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer"); 54 } 55 #endif 56 57 // Given an address range, ensure it is committed. 58 // 59 // The range has to be aligned to granule size. 60 // 61 // Function will: 62 // - check how many granules in that region are uncommitted; If all are committed, it 63 // returns true immediately. 64 // - check if committing those uncommitted granules would bring us over the commit limit 65 // (GC threshold, MaxMetaspaceSize). If true, it returns false. 66 // - commit the memory. 67 // - mark the range as committed in the commit mask 68 // 69 // Returns true if success, false if it did hit a commit limit. 70 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) { 71 72 DEBUG_ONLY(check_is_aligned_to_commit_granule(p);) 73 DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) 74 assert_lock_strong(MetaspaceExpand_lock); 75 76 // First calculate how large the committed regions in this range are 77 const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size); 78 DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);) 79 80 // By how much words we would increase commit charge 81 // were we to commit the given address range completely. 82 const size_t commit_increase_words = word_size - committed_words_in_range; 83 84 if (commit_increase_words == 0) { 85 return true; // Already fully committed, nothing to do. 86 } 87 88 // Before committing any more memory, check limits. 89 if (_commit_limiter->possible_expansion_words() < commit_increase_words) { 90 return false; 91 } 92 93 // Commit... 94 if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) { 95 vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace."); 96 } 97 98 log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.", 99 commit_increase_words * BytesPerWord); 100 101 // ... tell commit limiter... 102 _commit_limiter->increase_committed(commit_increase_words); 103 104 // ... update counters in containing vslist ... 105 _total_committed_words_counter->increment_by(commit_increase_words); 106 107 // ... and update the commit mask. 108 _commit_mask.mark_range_as_committed(p, word_size); 109 110 #ifdef ASSERT 111 // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words 112 // in both class and non-class vslist (outside gtests). 113 if (_commit_limiter == CommitLimiter::globalLimiter()) { 114 assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch"); 115 } 116 #endif 117 118 return true; 119 120 } 121 122 // Given an address range, ensure it is committed. 123 // 124 // The range does not have to be aligned to granule size. However, the function will always commit 125 // whole granules. 126 // 127 // Function will: 128 // - check how many granules in that region are uncommitted; If all are committed, it 129 // returns true immediately. 130 // - check if committing those uncommitted granules would bring us over the commit limit 131 // (GC threshold, MaxMetaspaceSize). If true, it returns false. 132 // - commit the memory. 133 // - mark the range as committed in the commit mask 134 // 135 // Returns true if success, false if it did hit a commit limit. 136 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) { 137 138 assert_lock_strong(MetaspaceExpand_lock); 139 assert(p != NULL && word_size > 0, "Sanity"); 140 141 MetaWord* p_start = align_down(p, constants::commit_granule_bytes); 142 MetaWord* p_end = align_up(p + word_size, constants::commit_granule_bytes); 143 144 // Todo: simple for now. Make it more intelligent late 145 return commit_range(p_start, p_end - p_start); 146 147 } 148 149 // Given an address range (which has to be aligned to commit granule size): 150 // - uncommit it 151 // - mark it as uncommitted in the commit mask 152 bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) { 153 154 DEBUG_ONLY(check_is_aligned_to_commit_granule(p);) 155 DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) 156 assert_lock_strong(MetaspaceExpand_lock); 157 158 // First calculate how large the committed regions in this range are 159 const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size); 160 DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);) 161 162 if (committed_words_in_range == 0) { 163 return true; // Already fully uncommitted, nothing to do. 164 } 165 166 // Uncommit... 167 if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) { 168 // Note: this can actually happen, since uncommit may increase the number of mappings. 169 fatal("Failed to uncommit metaspace."); 170 } 171 172 log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.", 173 committed_words_in_range * BytesPerWord); 174 175 // ... tell commit limiter... 176 _commit_limiter->decrease_committed(committed_words_in_range); 177 178 // ... and global counters... 179 _total_committed_words_counter->decrement_by(committed_words_in_range); 180 181 // ... and update the commit mask. 182 _commit_mask.mark_range_as_uncommitted(p, word_size); 183 184 #ifdef ASSERT 185 // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words 186 // in both class and non-class vslist (outside gtests). 187 if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario 188 assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch"); 189 } 190 #endif 191 192 return true; 193 194 } 195 196 //// creation, destruction //// 197 198 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, 199 CommitLimiter* limiter, 200 SizeCounter* reserve_counter, 201 SizeCounter* commit_counter) 202 : _next(NULL), 203 _base((MetaWord*)rs.base()), 204 _word_size(rs.size() / BytesPerWord), 205 _used_words(0), 206 _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord), 207 _chunk_tree_array((MetaWord*)rs.base(), rs.size() / BytesPerWord), 208 _commit_limiter(limiter), 209 _total_reserved_words_counter(reserve_counter), 210 _total_committed_words_counter(commit_counter) 211 { 212 // Update reserved counter in vslist 213 _total_reserved_words_counter->increment_by(_word_size); 214 } 215 216 // Create a node of a given size 217 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size, 218 CommitLimiter* limiter, 219 SizeCounter* reserve_counter, 220 SizeCounter* commit_counter) 221 { 222 223 DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) 224 225 ReservedSpace rs(word_size * BytesPerWord, 226 constants::commit_granule_bytes, 227 false, // TODO deal with large pages 228 false); 229 230 if (!rs.is_reserved()) { 231 vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace"); 232 } 233 234 reserve_counter->increment_by(word_size * BytesPerWord); 235 236 return create_node(rs, limiter, reserve_counter, commit_counter); 237 238 } 239 240 // Create a node over an existing space 241 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, 242 CommitLimiter* limiter, 243 SizeCounter* reserve_counter, 244 SizeCounter* commit_counter) 245 { 246 reserve_counter->increment_by(rs.size() * BytesPerWord); 247 return new VirtualSpaceNode(rs, limiter, reserve_counter, commit_counter); 248 } 249 250 VirtualSpaceNode::~VirtualSpaceNode() { 251 _rs.release(); 252 253 254 // Update counters in vslist 255 _total_committed_words_counter->decrement_by(committed_words()); 256 _total_reserved_words_counter->decrement_by(_word_size); 257 258 } 259 260 261 262 //// Chunk allocation, splitting, merging ///// 263 264 // Allocate a root chunk from this node. Will fail and return NULL 265 // if the node is full. 266 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet. 267 // Hence, before using this chunk, it must be committed. 268 // Also, no limits are checked, since no committing takes place. 269 Metachunk* VirtualSpaceNode::allocate_root_chunk() { 270 271 assert_lock_strong(MetaspaceExpand_lock); 272 273 assert_is_aligned(free_words(), chklvl::MAX_CHUNK_WORD_SIZE); 274 275 if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) { 276 277 MetaWord* loc = _base + _used_words; 278 _used_words += chklvl::MAX_CHUNK_WORD_SIZE; 279 280 // Create a new chunk tree for that new root node. 281 ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc); 282 283 // Create a root chunk header and initialize it; 284 Metachunk* c = tree->alloc_root_chunk_header(); 285 286 // Wire it to the memory. 287 c->set_base(loc); 288 289 DEBUG_ONLY(c->verify(true);) 290 return c; 291 292 } 293 294 return NULL; // Node is full. 295 296 } 297 298 Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) { 299 300 assert_lock_strong(MetaspaceExpand_lock); 301 302 // Get the tree associated with this chunk and let it handle the splitting 303 ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base()); 304 return tree->split(target_level, c, splinters); 305 306 } 307 308 Metachunk* VirtualSpaceNode::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) { 309 310 assert_lock_strong(MetaspaceExpand_lock); 311 312 // Get the tree associated with this chunk and let it handle the merging 313 ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base()); 314 return tree->merge(c, num_merged); 315 316 } 317 318 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const { 319 320 st->print("node @" PTR_FORMAT ": ", p2i(this)); 321 st->print("reserved="); 322 print_scaled_words(st, word_size(), scale); 323 st->print(", committed="); 324 print_scaled_words_and_percentage(st, committed_words(), word_size(), scale); 325 st->print(", used="); 326 print_scaled_words_and_percentage(st, used_words(), word_size(), scale); 327 328 st->cr(); 329 330 st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", 331 p2i(base()), p2i(base() + used_words()), p2i(base() + word_size())); 332 333 st->print("Tree/Chunk footprint: "); 334 print_scaled_words(st, _chunk_tree_array.memory_footprint_words(), scale); 335 336 st->cr(); 337 338 } 339 340 341 #ifdef ASSERT 342 // Verify counters and basic structure. Slow mode: verify all chunks in depth 343 void VirtualSpaceNode::verify(bool slow) const { 344 345 assert_lock_strong(MetaspaceExpand_lock); 346 347 assert(base() != NULL, "Invalid base"); 348 assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE); 349 assert(used_words() < word_size(), "Sanity"); 350 351 // Since we only ever hand out root chunks from a vsnode, top should always be aligned 352 // to root chunk size. 353 assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE); 354 355 _commit_mask.verify(slow); 356 _chunk_tree_array.verify(slow); 357 358 } 359 360 // Returns sum of committed space, in words. 361 size_t VirtualSpaceNode::committed_words() const { 362 return _commit_mask.get_committed_size(); 363 } 364 #endif 365 366 367 } // namespace metaspace 368