rev 57380 : [mq]: metaspace-improvement

   1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include <memory/metaspace/commitCounter.hpp>
  26 #include "precompiled.hpp"
  27 
  28 #include "logging/log.hpp"
  29 
  30 #include "memory/metaspace/constants.hpp"
  31 #include "memory/metaspace/metachunk.hpp"





  32 #include "memory/metaspace/virtualSpaceNode.hpp"
  33 
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/os.hpp"
  36 
  37 #include "utilities/align.hpp"
  38 #include "utilities/debug.hpp"
  39 #include "utilities/globalDefinitions.hpp"
  40 
  41 namespace metaspace {
  42 
  43 #ifdef ASSERT
  44 template <class T>
  45 void check_is_aligned_to_commit_granule(T x) {
  46   assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer");








  47 }
  48 #endif
  49 
  50 // Given an address range (which has to be aligned to commit granule size):
  51 //  - commit it
  52 //  - mark it as committed in the commit mask
  53 // May fail and return NULL if we hit a global commit limit (GC threshold, MaxMetaspaceSize).
  54 // Will assert if we run out of memory.
  55 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {






  56 
  57   DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
  58   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
  59   assert_lock_strong(MetaspaceExpand_lock);
  60 
  61   // First calculate how large the committed regions in this range are
  62   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
  63   DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)













  64 
  65   // By how much words we would increase commit charge
  66   //  were we to commit the given address range completely.
  67   const size_t commit_increase_words = word_size - committed_words_in_range;
  68 
  69   if (commit_increase_words == 0) {
  70     return true; // Already fully committed, nothing to do.
  71   }
  72 
  73   // Before committing any more memory, check limits.
  74   if (CommitCounter::possible_expansion_words() < commit_increase_words) {
  75     return false;






























  76   }


  77 
  78   // Commit...
  79   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
  80     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");










  81   }
  82 
  83   log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.",
  84                            commit_increase_words * BytesPerWord);





  85 
  86   // ... increase global commit counter..
  87   CommitCounter::increase_committed(commit_increase_words);












  88 
  89   // ... and update the commit mask.
  90   _commit_mask.mark_range_as_committed(p, word_size);
  91 
  92   return true;
  93 






























  94 }
  95 
  96 // Given an address range (which has to be aligned to commit granule size), ensure
  97 // it is committed.
  98 //  - commit it
  99 //  - mark it as committed in the commit mask
 100 // May fail and return NULL if we hit a global commit limit (GC threshold, MaxMetaspaceSize).
 101 // Will assert if we run out of memory.
 102 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
 103 
 104   // Todo: simple for now. Make it more intelligent late
 105   return commit_range(p, word_size);






































 106 








 107 }

 108 
 109 // Given an address range (which has to be aligned to commit granule size):
 110 //  - uncommit it
 111 //  - mark it as uncommitted in the commit mask
 112 bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
 113 
 114   DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
 115   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
 116   assert_lock_strong(MetaspaceExpand_lock);


 117 
 118   // First calculate how large the committed regions in this range are
 119   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 120   DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)
 121 
 122   if (committed_words_in_range == 0) {
 123     return true; // Already fully uncommitted, nothing to do.
 124   }





 125 
 126   // Uncommit...
 127   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
 128     // Note: this can actually happen, since uncommit may increase the number of mappings.
 129     fatal("Failed to uncommit metaspace.");
 130   }
 131 
 132   log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.",
 133                             committed_words_in_range * BytesPerWord);


 134 
 135   // ... increase global commit counter..
 136   CommitCounter::decrease_committed(committed_words_in_range);

 137 
 138   // ... and update the commit mask.
 139   _commit_mask.mark_range_as_uncommitted(p, word_size);
 140 
 141   return true;
 142 
 143 }
 144 
 145 //// creation ////

 146 
 147 // Create a new empty node spanning the given reserved space.
 148 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs)
 149   : _next(NULL)
 150   , _rs(rs)
 151   , _base(rs.base())
 152   , _word_size(rs.size() / BytesPerWord)
 153   , _used_words(0)
 154   , _commit_mask(rs.base(), rs.size() / BytesPerWord)
 155   , _chunk_tree_array(rs.base(), rs.size() / BytesPerWord)
 156 {}





































 157 
 158 // Create a node of a given size
 159 static VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size) {
 160 
 161   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
 162 
 163   ReservedSpace rs(word_size * BytesPerWord,
 164                    constants::commit_granule_bytes,
 165                    false, // TODO deal with large pages
 166                    false);

































 167 
 168   if (!rs.is_reserved()) {
 169     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");









 170   }
 171 
 172   return create_node(rs);

 173 
 174 }



















 175 
 176 // Create a node over an existing space
 177 static VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs) {
 178   return new VirtualSpaceNode(rs);
 179 }
 180 






 181 

 182 
 183 //// Chunk allocation, splitting, merging /////






 184 
 185 // Allocate a root chunk from this node. Will fail and return NULL
 186 // if the node is full.
 187 // Note that the chunk memory may or may not be committed.
 188 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 189 
 190   const size_t remaining_words = _word_size - _used_words;

 191 
 192   if (remaining_words >= chklvl::MAX_CHUNK_WORD_SIZE) {
 193 
 194     MetaWord* loc = _base + _used_words;
 195     _used_words += chklvl::MAX_CHUNK_WORD_SIZE;


 196 
 197     // Create a new chunk tree for that new root node.
 198     ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc);
 199 
 200     // Create a root chunk and initialize it;
 201     Metachunk* c = tree->alloc_root();
 202     c->set_base(loc);
 203 
 204     DEBUG_ONLY(c->verify(true);)
 205     return c;
 206 







 207   }
 208 
 209   // There should be no "leftover" space since the mapping for a
 210   // VirtualSpaceNode should always be aligned to root chunk size.
 211   assert(remaining_words == 0, "Sanity");
 212 
 213   return NULL; // Node is full.

 214 




 215 }
 216 
 217 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
 218 // Returns pointer to the result chunk; returns split off chunks in p_splinters as linked list.
 219 // Returns NULL if chunk cannot be split at least once.
 220 Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk** p_splinters) {
 221 
 222   // Get the tree associated with this chunk and let it handle the splitting
 223   ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
 224   return tree->split(target_level, c, p_splinters);
 225 
 226 }















 227 
 228 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
 229 // If successful (merged at least once), returns address of
 230 // the merged chunk; NULL otherwise.
 231 //
 232 // !!! Please note that if this method returns a non-NULL value, the
 233 // original chunk will be invalid and should not be accessed anymore! !!!
 234 Metachunk* VirtualSpaceNode::merge(Metachunk* c) {
 235 
 236   // Get the tree associated with this chunk and let it handle the merging
 237   ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
 238   return tree->merge(c);
 239 





















 240 }
 241 
 242 #ifdef ASSERT
 243 // Verify counters and basic structure. Slow mode: verify all chunks in depth
 244 void VirtualSpaceNode::verify(bool slow) {
 245 
 246 

 247 



























 248 }
 249 #endif
 250 
 251 
 252 } // namespace metaspace
 253 
--- EOF ---