1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "logging/log.hpp"
  29 
  30 #include "memory/metaspace/chunkLevel.hpp"
  31 #include "memory/metaspace/chunkTree.hpp"
  32 #include "memory/metaspace/commitLimiter.hpp"
  33 #include "memory/metaspace/constants.hpp"
  34 #include "memory/metaspace/counter.hpp"
  35 #include "memory/metaspace/metachunk.hpp"
  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/runningCounters.hpp"
  38 #include "memory/metaspace/virtualSpaceNode.hpp"
  39 
  40 #include "runtime/mutexLocker.hpp"
  41 #include "runtime/os.hpp"
  42 
  43 #include "utilities/align.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 namespace metaspace {
  48 
  49 #ifdef ASSERT
  50 template <class T>
  51 void check_is_aligned_to_commit_granule(T x) {
  52   assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer");
  53 }
  54 #endif
  55 
  56 // Given an address range, ensure it is committed.
  57 //
  58 // The range has to be aligned to granule size.
  59 //
  60 // Function will:
  61 // - check how many granules in that region are uncommitted; If all are committed, it
  62 //    returns true immediately.
  63 // - check if committing those uncommitted granules would bring us over the commit limit
  64 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
  65 // - commit the memory.
  66 // - mark the range as committed in the commit mask
  67 //
  68 // Returns true if success, false if it did hit a commit limit.
  69 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
  70 
  71   DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
  72   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
  73   assert_lock_strong(MetaspaceExpand_lock);
  74 
  75   // First calculate how large the committed regions in this range are
  76   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
  77   DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)
  78 
  79   // By how much words we would increase commit charge
  80   //  were we to commit the given address range completely.
  81   const size_t commit_increase_words = word_size - committed_words_in_range;
  82 
  83   if (commit_increase_words == 0) {
  84     return true; // Already fully committed, nothing to do.
  85   }
  86 
  87   // Before committing any more memory, check limits.
  88   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
  89     return false;
  90   }
  91 
  92   // Commit...
  93   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
  94     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
  95   }
  96 
  97   log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.",
  98                            commit_increase_words * BytesPerWord);
  99 
 100   // ... tell commit limiter...
 101   _commit_limiter->increase_committed(commit_increase_words);
 102 
 103   // ... update counters in containing vslist ...
 104   _total_committed_words_counter->increment_by(commit_increase_words);
 105 
 106   // ... and update the commit mask.
 107   _commit_mask.mark_range_as_committed(p, word_size);
 108 
 109 #ifdef ASSERT
 110   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 111   // in both class and non-class vslist (outside gtests).
 112   if (_commit_limiter == CommitLimiter::globalLimiter()) {
 113     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 114   }
 115 #endif
 116 
 117   return true;
 118 
 119 }
 120 
 121 // Given an address range, ensure it is committed.
 122 //
 123 // The range does not have to be aligned to granule size. However, the function will always commit
 124 // whole granules.
 125 //
 126 // Function will:
 127 // - check how many granules in that region are uncommitted; If all are committed, it
 128 //    returns true immediately.
 129 // - check if committing those uncommitted granules would bring us over the commit limit
 130 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 131 // - commit the memory.
 132 // - mark the range as committed in the commit mask
 133 //
 134 // Returns true if success, false if it did hit a commit limit.
 135 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
 136 
 137   assert_lock_strong(MetaspaceExpand_lock);
 138   assert(p != NULL && word_size > 0, "Sanity");
 139 
 140   MetaWord* p_start = align_down(p, constants::commit_granule_bytes);
 141   MetaWord* p_end = align_up(p + word_size, constants::commit_granule_bytes);
 142 
 143   // Todo: simple for now. Make it more intelligent late
 144   return commit_range(p_start, p_end - p_start);
 145 
 146 }
 147 
 148 // Given an address range (which has to be aligned to commit granule size):
 149 //  - uncommit it
 150 //  - mark it as uncommitted in the commit mask
 151 bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
 152 
 153   DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
 154   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
 155   assert_lock_strong(MetaspaceExpand_lock);
 156 
 157   // First calculate how large the committed regions in this range are
 158   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 159   DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)
 160 
 161   if (committed_words_in_range == 0) {
 162     return true; // Already fully uncommitted, nothing to do.
 163   }
 164 
 165   // Uncommit...
 166   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
 167     // Note: this can actually happen, since uncommit may increase the number of mappings.
 168     fatal("Failed to uncommit metaspace.");
 169   }
 170 
 171   log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.",
 172                             committed_words_in_range * BytesPerWord);
 173 
 174   // ... tell commit limiter...
 175   _commit_limiter->decrease_committed(committed_words_in_range);
 176 
 177   // ... and global counters...
 178   _total_committed_words_counter->decrement_by(committed_words_in_range);
 179 
 180    // ... and update the commit mask.
 181   _commit_mask.mark_range_as_uncommitted(p, word_size);
 182 
 183 #ifdef ASSERT
 184   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 185   // in both class and non-class vslist (outside gtests).
 186   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
 187     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 188   }
 189 #endif
 190 
 191   return true;
 192 
 193 }
 194 
 195 //// creation, destruction ////
 196 
 197 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs,
 198                                    CommitLimiter* limiter,
 199                                    SizeCounter* reserve_counter,
 200                                    SizeCounter* commit_counter)
 201   : _next(NULL),
 202     _base(rs.base()),
 203     _word_size(rs.size() / BytesPerWord),
 204     _used_words(0),
 205     _commit_mask(rs.base(), rs.size() / BytesPerWord),
 206     _chunk_tree_array(rs.base(), rs.size() / BytesPerWord),
 207     _commit_limiter(limiter),
 208     _total_reserved_words_counter(reserve_counter),
 209     _total_committed_words_counter(commit_counter)
 210 {
 211   // Update reserved counter in vslist
 212   _total_reserved_words_counter->increment_by(_word_size);
 213 }
 214 
 215 // Create a node of a given size
 216 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
 217                                                 CommitLimiter* limiter,
 218                                                 SizeCounter* reserve_counter,
 219                                                 SizeCounter* commit_counter)
 220 {
 221 
 222   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
 223 
 224   ReservedSpace rs(word_size * BytesPerWord,
 225                    constants::commit_granule_bytes,
 226                    false, // TODO deal with large pages
 227                    false);
 228 
 229   if (!rs.is_reserved()) {
 230     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
 231   }
 232 
 233   reserve_counter->increment_by(word_size * BytesPerWord);
 234 
 235   return create_node(rs, limiter, reserve_counter, commit_counter);
 236 
 237 }
 238 
 239 // Create a node over an existing space
 240 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs,
 241                                                 CommitLimiter* limiter,
 242                                                 SizeCounter* reserve_counter,
 243                                                 SizeCounter* commit_counter)
 244 {
 245   reserve_counter->increment_by(rs.size() * BytesPerWord);
 246   return new VirtualSpaceNode(rs, limiter, reserve_counter, commit_counter);
 247 }
 248 
 249 VirtualSpaceNode::~VirtualSpaceNode() {
 250   _rs.release();
 251 
 252 
 253   // Update counters in vslist
 254   _total_committed_words_counter->decrement_by(committed_words());
 255   _total_reserved_words_counter->decrement_by(_word_size);
 256 
 257 }
 258 
 259 
 260 
 261 //// Chunk allocation, splitting, merging /////
 262 
 263 // Allocate a root chunk from this node. Will fail and return NULL
 264 // if the node is full.
 265 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
 266 // Hence, before using this chunk, it must be committed.
 267 // Also, no limits are checked, since no committing takes place.
 268 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 269 
 270   assert_lock_strong(MetaspaceExpand_lock);
 271 
 272   assert_is_aligned(free_words, chklvl::MAX_CHUNK_WORD_SIZE);
 273 
 274   if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) {
 275 
 276     MetaWord* loc = _base + _used_words;
 277     _used_words += chklvl::MAX_CHUNK_WORD_SIZE;
 278 
 279     // Create a new chunk tree for that new root node.
 280     ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc);
 281 
 282     // Create a root chunk header and initialize it;
 283     Metachunk* c = tree->alloc_root_chunk_header();
 284 
 285     // Wire it to the memory.
 286     c->set_base(loc);
 287 
 288     DEBUG_ONLY(c->verify(true);)
 289     return c;
 290 
 291   }
 292 
 293   return NULL; // Node is full.
 294 
 295 }
 296 
 297 Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) {
 298 
 299   assert_lock_strong(MetaspaceExpand_lock);
 300 
 301   // Get the tree associated with this chunk and let it handle the splitting
 302   ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
 303   return tree->split(target_level, c, splinters);
 304 
 305 }
 306 
 307 Metachunk* VirtualSpaceNode::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) {
 308 
 309   assert_lock_strong(MetaspaceExpand_lock);
 310 
 311   // Get the tree associated with this chunk and let it handle the merging
 312   ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
 313   return tree->merge(c, num_merged);
 314 
 315 }
 316 
 317 #ifdef ASSERT
 318 // Verify counters and basic structure. Slow mode: verify all chunks in depth
 319 void VirtualSpaceNode::verify(bool slow) const {
 320 
 321   assert_lock_strong(MetaspaceExpand_lock);
 322 
 323   assert(base() != NULL, "Invalid base");
 324   assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE);
 325   assert(used_words() < word_size(), "Sanity");
 326 
 327   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
 328   // to root chunk size.
 329   assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE);
 330 
 331   _commit_mask.verify(slow);
 332   _chunk_tree_array.verify(slow);
 333 
 334 }
 335 
 336 // Returns sum of committed space, in words.
 337 size_t VirtualSpaceNode::committed_words() const {
 338   return _commit_mask.get_committed_size();
 339 }
 340 #endif
 341 
 342 
 343 } // namespace metaspace
 344