< prev index next >

src/hotspot/share/memory/metaspace/virtualSpaceList.cpp

Print this page
rev 57601 : [mq]: metaspace-improvement


   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "precompiled.hpp"
  27 #include "logging/log.hpp"
  28 #include "logging/logStream.hpp"
  29 #include "memory/metaspace.hpp"
  30 #include "memory/metaspace/chunkManager.hpp"
  31 #include "memory/metaspace/metachunk.hpp"
  32 #include "memory/metaspace/metaspaceCommon.hpp"

  33 #include "memory/metaspace/virtualSpaceList.hpp"
  34 #include "memory/metaspace/virtualSpaceNode.hpp"
  35 #include "runtime/orderAccess.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/safepoint.hpp"
  38 
  39 namespace metaspace {
  40 

































  41 
  42 VirtualSpaceList::~VirtualSpaceList() {
  43   VirtualSpaceListIterator iter(virtual_space_list());
  44   while (iter.repeat()) {
  45     VirtualSpaceNode* vsl = iter.get_next();
  46     delete vsl;




  47   }
  48 }
  49 
  50 void VirtualSpaceList::inc_reserved_words(size_t v) {
  51   assert_lock_strong(MetaspaceExpand_lock);
  52   _reserved_words = _reserved_words + v;
  53 }
  54 void VirtualSpaceList::dec_reserved_words(size_t v) {
  55   assert_lock_strong(MetaspaceExpand_lock);
  56   _reserved_words = _reserved_words - v;
  57 }
  58 
  59 #define assert_committed_below_limit()                        \
  60   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
  61          "Too much committed memory. Committed: " SIZE_FORMAT \
  62          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
  63           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
  64 
  65 void VirtualSpaceList::inc_committed_words(size_t v) {
  66   assert_lock_strong(MetaspaceExpand_lock);
  67   _committed_words = _committed_words + v;
  68 
  69   assert_committed_below_limit();
  70 }
  71 void VirtualSpaceList::dec_committed_words(size_t v) {
  72   assert_lock_strong(MetaspaceExpand_lock);
  73   _committed_words = _committed_words - v;
  74 
  75   assert_committed_below_limit();
  76 }
  77 
  78 void VirtualSpaceList::inc_virtual_space_count() {
  79   assert_lock_strong(MetaspaceExpand_lock);
  80   _virtual_space_count++;
  81 }
  82 
  83 void VirtualSpaceList::dec_virtual_space_count() {
  84   assert_lock_strong(MetaspaceExpand_lock);
  85   _virtual_space_count--;
  86 }
  87 
  88 // Walk the list of VirtualSpaceNodes and delete
  89 // nodes with a 0 container_count.  Remove Metachunks in
  90 // the node from their respective freelists.
  91 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  92   assert_lock_strong(MetaspaceExpand_lock);
  93   // Don't use a VirtualSpaceListIterator because this
  94   // list is being changed and a straightforward use of an iterator is not safe.
  95   VirtualSpaceNode* prev_vsl = virtual_space_list();
  96   VirtualSpaceNode* next_vsl = prev_vsl;
  97   int num_purged_nodes = 0;
  98   while (next_vsl != NULL) {
  99     VirtualSpaceNode* vsl = next_vsl;
 100     DEBUG_ONLY(vsl->verify(false);)
 101     next_vsl = vsl->next();
 102     // Don't free the current virtual space since it will likely
 103     // be needed soon.
 104     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
 105       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
 106                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
 107       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
 108       // Unlink it from the list
 109       if (prev_vsl == vsl) {
 110         // This is the case of the current node being the first node.
 111         assert(vsl == virtual_space_list(), "Expected to be the first node");
 112         set_virtual_space_list(vsl->next());
 113       } else {
 114         prev_vsl->set_next(vsl->next());
 115       }
 116 
 117       vsl->purge(chunk_manager);
 118       dec_reserved_words(vsl->reserved_words());
 119       dec_committed_words(vsl->committed_words());
 120       dec_virtual_space_count();
 121       delete vsl;
 122       num_purged_nodes ++;
 123     } else {
 124       prev_vsl = vsl;
 125     }
 126   }
 127 
 128   // Verify list
 129 #ifdef ASSERT
 130   if (num_purged_nodes > 0) {
 131     verify(false);
 132   }
 133 #endif
 134 }
 135 
 136 
 137 // This function looks at the mmap regions in the metaspace without locking.
 138 // The chunks are added with store ordering and not deleted except for at
 139 // unloading time during a safepoint.
 140 VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
 141   // List should be stable enough to use an iterator here because removing virtual
 142   // space nodes is only allowed at a safepoint.
 143   if (is_within_envelope((address)ptr)) {
 144     VirtualSpaceListIterator iter(virtual_space_list());
 145     while (iter.repeat()) {
 146       VirtualSpaceNode* vsn = iter.get_next();
 147       if (vsn->contains(ptr)) {
 148         return vsn;
 149       }
 150     }
 151   }
 152   return NULL;
 153 }
 154 
 155 void VirtualSpaceList::retire_current_virtual_space() {
 156   assert_lock_strong(MetaspaceExpand_lock);
 157 
 158   VirtualSpaceNode* vsn = current_virtual_space();
 159 
 160   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
 161                                   Metaspace::chunk_manager_metadata();
 162 
 163   vsn->retire(cm);
 164 }
 165 
 166 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
 167                                    _virtual_space_list(NULL),
 168                                    _current_virtual_space(NULL),
 169                                    _is_class(false),
 170                                    _reserved_words(0),
 171                                    _committed_words(0),
 172                                    _virtual_space_count(0),
 173                                    _envelope_lo((address)max_uintx),
 174                                    _envelope_hi(NULL) {
 175   MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 176   create_new_virtual_space(word_size);
 177 }
 178 
 179 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
 180                                    _virtual_space_list(NULL),
 181                                    _current_virtual_space(NULL),
 182                                    _is_class(true),
 183                                    _reserved_words(0),
 184                                    _committed_words(0),
 185                                    _virtual_space_count(0),
 186                                    _envelope_lo((address)max_uintx),
 187                                    _envelope_hi(NULL) {
 188   MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 189   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
 190   bool succeeded = class_entry->initialize();
 191   if (succeeded) {
 192     expand_envelope_to_include_node(class_entry);
 193     // ensure lock-free iteration sees fully initialized node
 194     OrderAccess::storestore();
 195     link_vs(class_entry);
 196   }
 197 }
 198 
 199 size_t VirtualSpaceList::free_bytes() {
 200   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
 201 }
 202 
 203 // Allocate another meta virtual space and add it to the list.
 204 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
 205   assert_lock_strong(MetaspaceExpand_lock);
 206 
 207   if (is_class()) {
 208     assert(false, "We currently don't support more than one VirtualSpace for"
 209                   " the compressed class space. The initialization of the"
 210                   " CCS uses another code path and should not hit this path.");
 211     return false;
 212   }
 213 
 214   if (vs_word_size == 0) {
 215     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
 216     return false;





 217   }
 218 
 219   // Reserve the space
 220   size_t vs_byte_size = vs_word_size * BytesPerWord;
 221   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
 222 
 223   // Allocate the meta virtual space and initialize it.
 224   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
 225   if (!new_entry->initialize()) {
 226     delete new_entry;
 227     return false;
 228   } else {
 229     assert(new_entry->reserved_words() == vs_word_size,
 230         "Reserved memory size differs from requested memory size");
 231     expand_envelope_to_include_node(new_entry);
 232     // ensure lock-free iteration sees fully initialized node
 233     OrderAccess::storestore();
 234     link_vs(new_entry);
 235     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
 236     return true;
 237   }
 238 
 239   DEBUG_ONLY(verify(false);)
 240 
 241 }
 242 
 243 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
 244   if (virtual_space_list() == NULL) {
 245       set_virtual_space_list(new_entry);
 246   } else {
 247     current_virtual_space()->set_next(new_entry);
 248   }
 249   set_current_virtual_space(new_entry);
 250   inc_reserved_words(new_entry->reserved_words());
 251   inc_committed_words(new_entry->committed_words());
 252   inc_virtual_space_count();
 253 #ifdef ASSERT
 254   new_entry->mangle();
 255 #endif
 256   LogTarget(Trace, gc, metaspace) lt;
 257   if (lt.is_enabled()) {
 258     LogStream ls(lt);
 259     VirtualSpaceNode* vsl = current_virtual_space();
 260     ResourceMark rm;
 261     vsl->print_on(&ls);
 262   }
 263 }
 264 
 265 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
 266                                       size_t min_words,
 267                                       size_t preferred_words) {
 268   size_t before = node->committed_words();
 269 
 270   bool result = node->expand_by(min_words, preferred_words);
 271 
 272   size_t after = node->committed_words();
 273 
 274   // after and before can be the same if the memory was pre-committed.
 275   assert(after >= before, "Inconsistency");
 276   inc_committed_words(after - before);
 277 
 278   return result;
 279 }
 280 
 281 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
 282   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
 283   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
 284   assert(min_words <= preferred_words, "Invalid arguments");
 285 
 286   const char* const class_or_not = (is_class() ? "class" : "non-class");
 287 
 288   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
 289     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
 290               class_or_not);
 291     return  false;
 292   }
 293 
 294   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
 295   if (allowed_expansion_words < min_words) {
 296     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
 297               class_or_not);
 298     return false;
 299   }
 300 
 301   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
 302 
 303   // Commit more memory from the the current virtual space.
 304   bool vs_expanded = expand_node_by(current_virtual_space(),
 305                                     min_words,
 306                                     max_expansion_words);
 307   if (vs_expanded) {
 308      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
 309                class_or_not);
 310      return true;
 311   }
 312   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
 313             class_or_not);
 314   retire_current_virtual_space();
 315 
 316   // Get another virtual space.
 317   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
 318   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
 319 
 320   if (create_new_virtual_space(grow_vs_words)) {
 321     if (current_virtual_space()->is_pre_committed()) {
 322       // The memory was pre-committed, so we are done here.
 323       assert(min_words <= current_virtual_space()->committed_words(),
 324           "The new VirtualSpace was pre-committed, so it"
 325           "should be large enough to fit the alloc request.");
 326       return true;
 327     }
 328 
 329     return expand_node_by(current_virtual_space(),
 330                           min_words,
 331                           max_expansion_words);
 332   }
 333 
 334   return false;
 335 }
 336 
 337 // Given a chunk, calculate the largest possible padding space which
 338 // could be required when allocating it.
 339 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
 340   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
 341   if (chunk_type != HumongousIndex) {
 342     // Normal, non-humongous chunks are allocated at chunk size
 343     // boundaries, so the largest padding space required would be that
 344     // minus the smallest chunk size.
 345     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 346     return chunk_word_size - smallest_chunk_size;
 347   } else {
 348     // Humongous chunks are allocated at smallest-chunksize
 349     // boundaries, so there is no padding required.
 350     return 0;
 351   }
 352 }
 353 
 354 
 355 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
 356 
 357   // Allocate a chunk out of the current virtual space.
 358   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 359 
 360   if (next != NULL) {
 361     return next;
 362   }
 363 
 364   // The expand amount is currently only determined by the requested sizes
 365   // and not how much committed memory is left in the current virtual space.
 366 
 367   // We must have enough space for the requested size and any
 368   // additional reqired padding chunks.
 369   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
 370 
 371   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
 372   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
 373   if (min_word_size >= preferred_word_size) {
 374     // Can happen when humongous chunks are allocated.
 375     preferred_word_size = min_word_size;
 376   }
 377 
 378   bool expanded = expand_by(min_word_size, preferred_word_size);
 379   if (expanded) {
 380     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 381     assert(next != NULL, "The allocation was expected to succeed after the expansion");
 382   }
 383 
 384    return next;
 385 }
 386 
 387 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
 388   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
 389       _virtual_space_count, p2i(_current_virtual_space));
 390   VirtualSpaceListIterator iter(virtual_space_list());
 391   while (iter.repeat()) {
 392     st->cr();
 393     VirtualSpaceNode* node = iter.get_next();
 394     node->print_on(st, scale);
 395   }
 396 }
 397 
 398 void VirtualSpaceList::print_map(outputStream* st) const {
 399   VirtualSpaceNode* list = virtual_space_list();
 400   VirtualSpaceListIterator iter(list);
 401   unsigned i = 0;
 402   while (iter.repeat()) {
 403     st->print_cr("Node %u:", i);
 404     VirtualSpaceNode* node = iter.get_next();
 405     node->print_map(st, this->is_class());
 406     i ++;
 407   }
 408 }
 409 
 410 // Given a node, expand range such that it includes the node.
 411 void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) {
 412   _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary());
 413   _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary());
 414 }
 415 
 416 
 417 #ifdef ASSERT
 418 void VirtualSpaceList::verify(bool slow) {
 419   VirtualSpaceNode* list = virtual_space_list();
 420   VirtualSpaceListIterator iter(list);
 421   size_t reserved = 0;
 422   size_t committed = 0;
 423   size_t node_count = 0;
 424   while (iter.repeat()) {
 425     VirtualSpaceNode* node = iter.get_next();
 426     if (slow) {
 427       node->verify(true);
 428     }
 429     // Check that the node resides fully within our envelope.
 430     assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi,
 431            "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ").",
 432            node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi));
 433     reserved += node->reserved_words();
 434     committed += node->committed_words();
 435     node_count ++;
 436   }
 437   assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count,
 438       "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT
 439       ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT
 440       ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT ".",
 441       reserved, reserved_words(), committed, committed_words(),
 442       node_count, _virtual_space_count);
 443 }
 444 #endif // ASSERT
 445 
 446 } // namespace metaspace
 447 


   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "precompiled.hpp"


  27 #include "memory/metaspace.hpp"
  28 #include "memory/metaspace/chunkManager.hpp"
  29 #include "memory/metaspace/counter.hpp"
  30 #include "memory/metaspace/commitLimiter.hpp"
  31 #include "memory/metaspace/counter.hpp"
  32 #include "memory/metaspace/virtualSpaceList.hpp"
  33 #include "memory/metaspace/virtualSpaceNode.hpp"

  34 #include "runtime/mutexLocker.hpp"
  35 
  36 
  37 namespace metaspace {
  38 
  39 // Create a new, empty, expandable list.
  40 VirtualSpaceList::VirtualSpaceList(const char* name, CommitLimiter* commit_limiter)
  41   : _name(name),
  42     _first_node(NULL),
  43     _current_node(NULL),
  44     _can_expand(true),
  45     _commit_limiter(commit_limiter),
  46     _reserved_words_counter(),
  47     _committed_words_counter()
  48 {
  49   // Create the first node right now. Nothing gets committed yet though.
  50   create_new_node();
  51 }
  52 
  53 // Create a new list. The list will contain one node only, which uses the given ReservedSpace.
  54 // It will be not expandable beyond that first node.
  55 VirtualSpaceList::VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter)
  56 : _name(name),
  57   _first_node(NULL),
  58   _current_node(NULL),
  59   _can_expand(false),
  60   _commit_limiter(commit_limiter),
  61   _reserved_words_counter(),
  62   _committed_words_counter()
  63 {
  64   // Create the first node spanning the existing ReservedSpace. This will be the only node created
  65   // for this list since we cannot expand.
  66   VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(rs, _commit_limiter,
  67                                                         &_reserved_words_counter, &_committed_words_counter);
  68   assert(vsn != NULL, "node creation failed");
  69   _first_node = _current_node = vsn;
  70   _current_node->set_next(NULL);
  71 }
  72 
  73 VirtualSpaceList::~VirtualSpaceList() {
  74   // Note: normally, there is no reason ever to delete a vslist since they are
  75   // global objects, but for gtests it makes sense to allow this.
  76   VirtualSpaceNode* vsn = _first_node;
  77   VirtualSpaceNode* vsn2 = vsn;
  78   while (vsn != NULL) {
  79     vsn2 = vsn->next();
  80     delete vsn;
  81     vsn = vsn2;
  82   }
  83 }
  84 
  85 // Create a new node and append it to the list. After
  86 // this function, _current_node shall point to a new empty node.
  87 // List must be expandable for this to work.
  88 void VirtualSpaceList::create_new_node() {
  89   assert(_can_expand, "List is not expandable");
  90   VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(constants::virtual_space_node_default_size,
  91                                                         _commit_limiter,
  92                                                         &_reserved_words_counter, &_committed_words_counter);
  93   assert(vsn != NULL, "node creation failed");
  94   vsn->set_next(_first_node);
  95   _first_node = _current_node = vsn;
  96 }
  97 
  98 // Allocate a root chunk from this list.
  99 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
 100 // Hence, before using this chunk, it must be committed.
 101 // Also, no limits are checked, since no committing takes place.
 102 Metachunk*  VirtualSpaceList::allocate_root_chunk() {
 103 
 104   assert(_current_node != NULL, "Sanity");
 105 
 106   Metachunk* c = _current_node->allocate_root_chunk();
 107 
 108   if (c == NULL) {
 109 
 110     // The current node is fully used up.
 111 
 112     // Since all allocations from a VirtualSpaceNode happen in root-chunk-size units,
 113     // we should never have remaining space.
 114     assert(_current_node->free_words() == 0, "Sanity");


 115 
 116     if (_can_expand) {
 117       create_new_node();






































 118     } else {
 119       return NULL; // We cannot expand this list.

























 120     }
 121   }





 122 
 123   c = _current_node->allocate_root_chunk();
 124 
 125   assert(c != NULL, "This should have worked");

 126 
 127   return c;

 128 











 129 }
 130 
 131 // Print all nodes in this space list.
 132 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
 133   const VirtualSpaceNode* vsn = _first_node;
 134   while (vsn != NULL) {
 135     vsn->print_on(st, scale);
 136     st->cr();
 137     vsn = vsn->next();










 138   }
 139 }
 140 
 141 #ifdef ASSERT
 142 void VirtualSpaceList::verify(bool slow) const {
 143   assert(_current_node != NULL && _first_node != NULL && _name != NULL, "Sanity");











 144 
 145   size_t total_reserved_words = 0;
 146   size_t total_committed_words = 0;
 147   const VirtualSpaceNode* vsn = _first_node;
 148   while (vsn != NULL) {
 149     vsn->verify(slow);
 150     total_reserved_words += vsn->word_size();
 151     total_committed_words += vsn->committed_words();
 152     vsn = vsn->next();
 153   }
 154 
 155   _reserved_words_counter.check(total_reserved_words);
 156   _committed_words_counter.check(total_committed_words);



















 157 
 158 }













 159 #endif





















 160 
 161 // Returns true if this pointer is contained in one of our nodes.
 162 bool VirtualSpaceList::contains(const MetaWord* p) const {
 163   const VirtualSpaceNode* vsn = _first_node;
 164   while (vsn != NULL) {
 165     if (vsn->contains(p)) {



























 166       return true;
 167     }
 168     vsn = vsn->next();



















 169   }

 170   return false;
 171 }
 172 
 173 VirtualSpaceList* VirtualSpaceList::_vslist_class = NULL;
 174 VirtualSpaceList* VirtualSpaceList::_vslist_nonclass = NULL;















 175 
 176 void VirtualSpaceList::set_vslist_class(VirtualSpaceList* vsl) {
 177   assert(_vslist_class == NULL, "Sanity");
 178   _vslist_class = vsl;



























 179 }
 180 
 181 void VirtualSpaceList::set_vslist_nonclass(VirtualSpaceList* vsl) {
 182   assert(_vslist_nonclass == NULL, "Sanity");
 183   _vslist_nonclass = vsl;





















































 184 }

 185 
 186 } // namespace metaspace
 187 
< prev index next >