< prev index next >

src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp

Print this page
rev 60538 : imported patch jep387-all.patch
   1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "logging/log.hpp"
  28 #include "logging/logStream.hpp"






  29 #include "memory/metaspace/metachunk.hpp"
  30 #include "memory/metaspace.hpp"
  31 #include "memory/metaspace/chunkManager.hpp"
  32 #include "memory/metaspace/metaDebug.hpp"
  33 #include "memory/metaspace/metaspaceCommon.hpp"
  34 #include "memory/metaspace/occupancyMap.hpp"


  35 #include "memory/metaspace/virtualSpaceNode.hpp"
  36 #include "memory/virtualspace.hpp"
  37 #include "runtime/atomic.hpp"


  38 #include "runtime/os.hpp"
  39 #include "services/memTracker.hpp"
  40 #include "utilities/copy.hpp"
  41 #include "utilities/debug.hpp"
  42 #include "utilities/globalDefinitions.hpp"

  43 
  44 namespace metaspace {
  45 
  46 // Decide if large pages should be committed when the memory is reserved.
  47 static bool should_commit_large_pages_when_reserving(size_t bytes) {
  48   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
  49     size_t words = bytes / BytesPerWord;
  50     bool is_class = false; // We never reserve large pages for the class space.
  51     if (MetaspaceGC::can_expand(words, is_class) &&
  52         MetaspaceGC::allowed_expansion() >= words) {
  53       return true;
  54     }
  55   }
  56 
  57   return false;








  58 }

  59 
  60 // byte_size is the size of the associated virtualspace.
  61 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
  62     _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) {
  63   assert_is_aligned(bytes, Metaspace::reserve_alignment());
  64   bool large_pages = should_commit_large_pages_when_reserving(bytes);
  65   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
  66 
  67   if (_rs.is_reserved()) {
  68     assert(_rs.base() != NULL, "Catch if we get a NULL address");
  69     assert(_rs.size() != 0, "Catch if we get a 0 size");
  70     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
  71     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
  72 
  73     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
  74   }
  75 }
  76 
  77 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  78   // When a node is purged, lets give it a thorough examination.
  79   DEBUG_ONLY(verify(true);)
  80   Metachunk* chunk = first_chunk();
  81   Metachunk* invalid_chunk = (Metachunk*) top();
  82   while (chunk < invalid_chunk ) {
  83     assert(chunk->is_tagged_free(), "Should be tagged free");
  84     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
  85     chunk_manager->remove_chunk(chunk);
  86     chunk->remove_sentinel();
  87     assert(chunk->next() == NULL &&
  88         chunk->prev() == NULL,
  89         "Was not removed from its list");
  90     chunk = (Metachunk*) next;
  91   }
  92 }
  93 
  94 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
  95 
  96   if (bottom() == top()) {
  97     return;
  98   }
  99 
 100   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 101   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 102   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 103 
 104   int line_len = 100;
 105   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 106   line_len = (int)(section_len / spec_chunk_size);
 107 
 108   static const int NUM_LINES = 4;
 109 
 110   char* lines[NUM_LINES];
 111   for (int i = 0; i < NUM_LINES; i ++) {
 112     lines[i] = (char*)os::malloc(line_len, mtInternal);
 113   }
 114   int pos = 0;
 115   const MetaWord* p = bottom();
 116   const Metachunk* chunk = (const Metachunk*)p;
 117   const MetaWord* chunk_end = p + chunk->word_size();
 118   while (p < top()) {
 119     if (pos == line_len) {
 120       pos = 0;
 121       for (int i = 0; i < NUM_LINES; i ++) {
 122         st->fill_to(22);
 123         st->print_raw(lines[i], line_len);
 124         st->cr();
 125       }
 126     }
 127     if (pos == 0) {
 128       st->print(PTR_FORMAT ":", p2i(p));
 129     }
 130     if (p == chunk_end) {
 131       chunk = (Metachunk*)p;
 132       chunk_end = p + chunk->word_size();
 133     }
 134     // line 1: chunk starting points (a dot if that area is a chunk start).
 135     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 136 
 137     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 138     // chunk is in use.
 139     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 140     if (chunk->word_size() == spec_chunk_size) {
 141       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 142     } else if (chunk->word_size() == small_chunk_size) {
 143       lines[1][pos] = chunk_is_free ? 's' : 'S';
 144     } else if (chunk->word_size() == med_chunk_size) {
 145       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 146     } else if (chunk->word_size() > med_chunk_size) {
 147       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 148     } else {
 149       ShouldNotReachHere();
 150     }
 151 
 152     // Line 3: chunk origin
 153     const ChunkOrigin origin = chunk->get_origin();
 154     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 155 
 156     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 157     //         but were never used.
 158     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 159 
 160     p += spec_chunk_size;
 161     pos ++;
 162   }
 163   if (pos > 0) {
 164     for (int i = 0; i < NUM_LINES; i ++) {
 165       st->fill_to(22);
 166       st->print_raw(lines[i], line_len);
 167       st->cr();
 168     }




 169   }
 170   for (int i = 0; i < NUM_LINES; i ++) {
 171     os::free(lines[i]);

 172   }
 173 }
 174 

 175 
 176 #ifdef ASSERT

 177 
 178 // Verify counters, all chunks in this list node and the occupancy map.
 179 void VirtualSpaceNode::verify(bool slow) {
 180   log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s).",
 181     (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
 182   // Fast mode: just verify chunk counters and basic geometry
 183   // Slow mode: verify chunks and occupancy map
 184   uintx num_in_use_chunks = 0;
 185   Metachunk* chunk = first_chunk();
 186   Metachunk* invalid_chunk = (Metachunk*) top();
 187 
 188   // Iterate the chunks in this node and verify each chunk.
 189   while (chunk < invalid_chunk ) {
 190     if (slow) {
 191       do_verify_chunk(chunk);
 192     }
 193     if (!chunk->is_tagged_free()) {
 194       num_in_use_chunks ++;
 195     }
 196     const size_t s = chunk->word_size();
 197     // Prevent endless loop on invalid chunk size.
 198     assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT ".", s);
 199     MetaWord* next = ((MetaWord*)chunk) + s;
 200     chunk = (Metachunk*) next;
 201   }
 202   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
 203       ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
 204   // Also verify the occupancy map.
 205   if (slow) {
 206     occupancy_map()->verify(bottom(), top());
 207   }
 208 }
 209 
 210 // Verify that all free chunks in this node are ideally merged
 211 // (there not should be multiple small chunks where a large chunk could exist.)
 212 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
 213   Metachunk* chunk = first_chunk();
 214   Metachunk* invalid_chunk = (Metachunk*) top();
 215   // Shorthands.
 216   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
 217   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
 218   int num_free_chunks_since_last_med_boundary = -1;
 219   int num_free_chunks_since_last_small_boundary = -1;
 220   bool error = false;
 221   char err[256];
 222   while (!error && chunk < invalid_chunk ) {
 223     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
 224     // Reset the counter when encountering a non-free chunk.
 225     if (chunk->get_chunk_type() != HumongousIndex) {
 226       if (chunk->is_tagged_free()) {
 227         // Count successive free, non-humongous chunks.
 228         if (is_aligned(chunk, size_small)) {
 229           if (num_free_chunks_since_last_small_boundary > 0) {
 230             error = true;
 231             jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT ".", p2i(chunk));
 232           } else {
 233             num_free_chunks_since_last_small_boundary = 0;
 234           }
 235         } else if (num_free_chunks_since_last_small_boundary != -1) {
 236           num_free_chunks_since_last_small_boundary ++;
 237         }
 238         if (is_aligned(chunk, size_med)) {
 239           if (num_free_chunks_since_last_med_boundary > 0) {
 240             error = true;
 241             jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT ".", p2i(chunk));
 242           } else {
 243             num_free_chunks_since_last_med_boundary = 0;
 244           }
 245         } else if (num_free_chunks_since_last_med_boundary != -1) {
 246           num_free_chunks_since_last_med_boundary ++;
 247         }
 248       } else {
 249         // Encountering a non-free chunk, reset counters.
 250         num_free_chunks_since_last_med_boundary = -1;
 251         num_free_chunks_since_last_small_boundary = -1;
 252       }
 253     } else {
 254       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
 255       num_free_chunks_since_last_med_boundary = -1;
 256       num_free_chunks_since_last_small_boundary = -1;
 257     }
 258 
 259     if (error) {
 260       print_map(tty, is_class());
 261       fatal("%s", err);
 262     }
 263 
 264     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 265     chunk = (Metachunk*) next;






 266   }






 267 }
 268 #endif // ASSERT
 269 
 270 void VirtualSpaceNode::inc_container_count() {




















 271   assert_lock_strong(MetaspaceExpand_lock);
 272   _container_count++;







 273 }
 274 
 275 void VirtualSpaceNode::dec_container_count() {






 276   assert_lock_strong(MetaspaceExpand_lock);
 277   _container_count--;
 278 }
 279 
 280 VirtualSpaceNode::~VirtualSpaceNode() {
 281   _rs.release();
 282   if (_occupancy_map != NULL) {
 283     delete _occupancy_map;






 284   }


















 285 #ifdef ASSERT
 286   size_t word_size = sizeof(*this) / BytesPerWord;
 287   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);



 288 #endif



 289 }
 290 
 291 size_t VirtualSpaceNode::used_words_in_vs() const {
 292   return pointer_delta(top(), bottom(), sizeof(MetaWord));






















 293 }
 294 
 295 // Space committed in the VirtualSpace
 296 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 297   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 298 }
 299 
 300 size_t VirtualSpaceNode::free_words_in_vs() const {
 301   return pointer_delta(end(), top(), sizeof(MetaWord));
 302 }
 303 
 304 // Given an address larger than top(), allocate padding chunks until top is at the given address.
 305 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
 306 
 307   assert(target_top > top(), "Sanity");
 308 
 309   // Padding chunks are added to the freelist.
 310   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
 311 
 312   // shorthands
 313   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
 314   const size_t small_word_size = chunk_manager->small_chunk_word_size();
 315   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
 316 
 317   while (top() < target_top) {
 318 
 319     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
 320     // for padding chunks, so it is not worth it.
 321     size_t padding_chunk_word_size = small_word_size;
 322     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
 323       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
 324       padding_chunk_word_size = spec_word_size;
 325     }
 326     MetaWord* here = top();
 327     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
 328     inc_top(padding_chunk_word_size);
 329 
 330     // Create new padding chunk.
 331     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
 332     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
 333 
 334     Metachunk* const padding_chunk =
 335         ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
 336     assert(padding_chunk == (Metachunk*)here, "Sanity");
 337     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
 338     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
 339         PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
 340         (is_class() ? "class space " : "metaspace"),
 341         p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
 342 
 343     // Mark chunk start in occupancy map.
 344     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
 345 
 346     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
 347     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
 348     // will assert that).
 349     do_update_in_use_info_for_chunk(padding_chunk, true);
 350 
 351     // Return Chunk to freelist.
 352     inc_container_count();
 353     chunk_manager->return_single_chunk(padding_chunk);
 354     // Please note: at this point, ChunkManager::return_single_chunk()
 355     // may already have merged the padding chunk with neighboring chunks, so
 356     // it may have vanished at this point. Do not reference the padding
 357     // chunk beyond this point.
 358   }
 359 
 360   assert(top() == target_top, "Sanity");
 361 
 362 } // allocate_padding_chunks_until_top_is_at()
 363 
 364 // Allocates the chunk from the virtual space only.
 365 // This interface is also used internally for debugging.  Not all
 366 // chunks removed here are necessarily used for allocation.
 367 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 368   // Non-humongous chunks are to be allocated aligned to their chunk
 369   // size. So, start addresses of medium chunks are aligned to medium
 370   // chunk size, those of small chunks to small chunk size and so
 371   // forth. This facilitates merging of free chunks and reduces
 372   // fragmentation. Chunk sizes are spec < small < medium, with each
 373   // larger chunk size being a multiple of the next smaller chunk
 374   // size.
 375   // Because of this alignment, me may need to create a number of padding
 376   // chunks. These chunks are created and added to the freelist.
 377 
 378   // The chunk manager to which we will give our padding chunks.
 379   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
 380 
 381   // shorthands
 382   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
 383   const size_t small_word_size = chunk_manager->small_chunk_word_size();
 384   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
 385 
 386   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
 387       chunk_word_size >= med_word_size, "Invalid chunk size requested.");
 388 
 389   // Chunk alignment (in bytes) == chunk size unless humongous.
 390   // Humongous chunks are aligned to the smallest chunk size (spec).
 391   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
 392       spec_word_size : chunk_word_size) * sizeof(MetaWord);
 393 
 394   // Do we have enough space to create the requested chunk plus
 395   // any padding chunks needed?
 396   MetaWord* const next_aligned =
 397       static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
 398   if (!is_available((next_aligned - top()) + chunk_word_size)) {
 399     return NULL;
 400   }
 401 
 402   // Before allocating the requested chunk, allocate padding chunks if necessary.
 403   // We only need to do this for small or medium chunks: specialized chunks are the
 404   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
 405   // (implicitly, also aligned to smallest chunk size).
 406   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
 407     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
 408         (is_class() ? "class space " : "metaspace"),
 409         top(), next_aligned);
 410     allocate_padding_chunks_until_top_is_at(next_aligned);
 411     // Now, top should be aligned correctly.
 412     assert_is_aligned(top(), required_chunk_alignment);
 413   }
 414 
 415   // Now, top should be aligned correctly.
 416   assert_is_aligned(top(), required_chunk_alignment);
 417 
 418   // Bottom of the new chunk
 419   MetaWord* chunk_limit = top();
 420   assert(chunk_limit != NULL, "Not safe to call this method");
 421 
 422   // The virtual spaces are always expanded by the
 423   // commit granularity to enforce the following condition.
 424   // Without this the is_available check will not work correctly.
 425   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 426       "The committed memory doesn't match the expanded memory.");
 427 
 428   if (!is_available(chunk_word_size)) {
 429     LogTarget(Trace, gc, metaspace, freelist) lt;
 430     if (lt.is_enabled()) {
 431       LogStream ls(lt);
 432       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 433       // Dump some information about the virtual space that is nearly full
 434       print_on(&ls);
 435     }
 436     return NULL;
 437   }
 438 
 439   // Take the space  (bump top on the current virtual space).
 440   inc_top(chunk_word_size);
 441 
 442   // Initialize the chunk
 443   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
 444   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
 445   assert(result == (Metachunk*)chunk_limit, "Sanity");
 446   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
 447   do_update_in_use_info_for_chunk(result, true);
 448 
 449   inc_container_count();




 450 
 451 #ifdef ASSERT
 452   EVERY_NTH(VerifyMetaspaceInterval)
 453     chunk_manager->locked_verify(true);
 454     verify(true);
 455   END_EVERY_NTH
 456   do_verify_chunk(result);
 457 #endif
 458 
 459   result->inc_use_count();












 460 
 461   return result;
 462 }
 463 









 464 
 465 // Expand the virtual space (commit more of the reserved space)
 466 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 467   size_t min_bytes = min_words * BytesPerWord;
 468   size_t preferred_bytes = preferred_words * BytesPerWord;
 469 
 470   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 471 
 472   if (uncommitted < min_bytes) {
 473     return false;
 474   }
 475 
 476   size_t commit = MIN2(preferred_bytes, uncommitted);
 477   bool result = virtual_space()->expand_by(commit, false);










































 478 
 479   if (result) {
 480     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
 481         (is_class() ? "class" : "non-class"), commit);
 482     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
 483   } else {
 484     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
 485         (is_class() ? "class" : "non-class"), commit);
 486   }
 487 
 488   assert(result, "Failed to commit memory");
 489 
 490   return result;
 491 }
 492 
 493 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {





 494   assert_lock_strong(MetaspaceExpand_lock);
 495   Metachunk* result = take_from_committed(chunk_word_size);
 496   return result;






 497 }
 498 
 499 bool VirtualSpaceNode::initialize() {









 500 
 501   if (!_rs.is_reserved()) {
 502     return false;



































 503   }
 504 
 505   // These are necessary restriction to make sure that the virtual space always
 506   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 507   // aligned only the middle alignment of the VirtualSpace is used.
 508   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
 509   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
 510 
 511   // ReservedSpaces marked as special will have the entire memory
 512   // pre-committed. Setting a committed size will make sure that
 513   // committed_size and actual_committed_size agrees.
 514   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 515 
 516   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 517       Metaspace::commit_alignment());
 518   if (result) {
 519     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 520         "Checking that the pre-committed memory was registered by the VirtualSpace");



 521 
 522     set_top((MetaWord*)virtual_space()->low());





 523   }
 524 
 525   // Initialize Occupancy Map.
 526   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
 527   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);





















 528 
 529   return result;
 530 }
 531 
 532 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
 533   size_t used_words = used_words_in_vs();
 534   size_t commit_words = committed_words();
 535   size_t res_words = reserved_words();
 536   VirtualSpace* vs = virtual_space();
 537 
 538   st->print("node @" PTR_FORMAT ": ", p2i(this));




 539   st->print("reserved=");
 540   print_scaled_words(st, res_words, scale);
 541   st->print(", committed=");
 542   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
 543   st->print(", used=");
 544   print_scaled_words_and_percentage(st, used_words, res_words, scale);

 545   st->cr();
 546   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
 547       PTR_FORMAT ", " PTR_FORMAT ")",
 548       p2i(bottom()), p2i(top()), p2i(end()),
 549       p2i(vs->high_boundary()));
 550 }
 551 
 552 #ifdef ASSERT
 553 void VirtualSpaceNode::mangle() {
 554   size_t word_size = capacity_words_in_vs();
 555   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
 556 }
 557 #endif // ASSERT
 558 
 559 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
 560   assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
 561 #ifdef ASSERT
 562   verify(false);
 563   EVERY_NTH(VerifyMetaspaceInterval)
 564     verify(true);
 565   END_EVERY_NTH
 566 #endif
 567   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
 568     ChunkIndex index = (ChunkIndex)i;
 569     size_t chunk_size = chunk_manager->size_by_index(index);
 570 
 571     while (free_words_in_vs() >= chunk_size) {
 572       Metachunk* chunk = get_chunk_vs(chunk_size);
 573       // Chunk will be allocated aligned, so allocation may require
 574       // additional padding chunks. That may cause above allocation to
 575       // fail. Just ignore the failed allocation and continue with the
 576       // next smaller chunk size. As the VirtualSpaceNode comitted
 577       // size should be a multiple of the smallest chunk size, we
 578       // should always be able to fill the VirtualSpace completely.
 579       if (chunk == NULL) {
 580         break;
 581       }
 582       chunk_manager->return_single_chunk(chunk);
 583     }
 584   }
 585   assert(free_words_in_vs() == 0, "should be empty now");
 586 }


























 587 
 588 } // namespace metaspace
   1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2018, 2020 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "logging/log.hpp"
  29 
  30 #include "memory/metaspace/chunkLevel.hpp"
  31 #include "memory/metaspace/chunkHeaderPool.hpp"
  32 #include "memory/metaspace/commitLimiter.hpp"
  33 #include "memory/metaspace/counter.hpp"
  34 #include "memory/metaspace/freeChunkList.hpp"
  35 #include "memory/metaspace/internStat.hpp"
  36 #include "memory/metaspace/metachunk.hpp"



  37 #include "memory/metaspace/metaspaceCommon.hpp"
  38 #include "memory/metaspace/rootChunkArea.hpp"
  39 #include "memory/metaspace/runningCounters.hpp"
  40 #include "memory/metaspace/settings.hpp"
  41 #include "memory/metaspace/virtualSpaceNode.hpp"
  42 #include "memory/metaspace.hpp"
  43 
  44 #include "runtime/globals.hpp"
  45 #include "runtime/mutexLocker.hpp"
  46 #include "runtime/os.hpp"
  47 
  48 #include "utilities/align.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/ostream.hpp"
  52 
  53 namespace metaspace {
  54 
  55 #define LOGFMT         "VsListNode @" PTR_FORMAT " base " PTR_FORMAT " "
  56 #define LOGFMT_ARGS    p2i(this), p2i(_base)








  57 
  58 #ifdef ASSERT
  59 void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
  60   assert(is_aligned(p, Settings::commit_granule_bytes()),
  61          "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
  62          p2i(p));
  63 }
  64 void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
  65   assert(is_aligned(word_size, Settings::commit_granule_words()),
  66          "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
  67 }
  68 #endif
  69 
  70 
  71 // Given an address range, ensure it is committed.
  72 //
  73 // The range has to be aligned to granule size.
  74 //
  75 // Function will:
  76 // - check how many granules in that region are uncommitted; If all are committed, it
  77 //    returns true immediately.
  78 // - check if committing those uncommitted granules would bring us over the commit limit
  79 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
  80 // - commit the memory.
  81 // - mark the range as committed in the commit mask
  82 //
  83 // Returns true if success, false if it did hit a commit limit.
  84 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
  85 
  86   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
  87   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
  88   assert_lock_strong(MetaspaceExpand_lock);
  89 
  90   // First calculate how large the committed regions in this range are
  91   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
  92   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
  93 
  94   // By how much words we would increase commit charge
  95   //  were we to commit the given address range completely.
  96   const size_t commit_increase_words = word_size - committed_words_in_range;
  97 
  98   UL2(debug, "committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
  99       p2i(p), p2i(p + word_size), word_size);
 100 
 101   if (commit_increase_words == 0) {
 102     UL(debug, "... already fully committed.");
 103     return true; // Already fully committed, nothing to do.



































 104   }
 105 
 106   // Before committing any more memory, check limits.
 107   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
 108     UL(debug, "... cannot commit (limit).");
 109     return false;

































 110   }
 111 
 112   // Commit...
 113   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
 114     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
 115   }
 116 
 117   if (AlwaysPreTouch) {
 118     os::pretouch_memory(p, p + word_size);
 119   }

 120 
 121   UL2(debug, "... committed " SIZE_FORMAT " additional words.", commit_increase_words);
 122 
 123   // ... tell commit limiter...
 124   _commit_limiter->increase_committed(commit_increase_words);
 125 
 126   // ... update counters in containing vslist ...
 127   _total_committed_words_counter->increment_by(commit_increase_words);



















































































 128 
 129   // ... and update the commit mask.
 130   _commit_mask.mark_range_as_committed(p, word_size);
 131 
 132 #ifdef ASSERT
 133   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 134   // in both class and non-class vslist (outside gtests).
 135   if (_commit_limiter == CommitLimiter::globalLimiter()) {
 136     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 137   }
 138 #endif
 139 
 140   InternalStats::inc_num_space_committed();
 141 
 142   return true;
 143 
 144 }

 145 
 146 // Given an address range, ensure it is committed.
 147 //
 148 // The range does not have to be aligned to granule size. However, the function will always commit
 149 // whole granules.
 150 //
 151 // Function will:
 152 // - check how many granules in that region are uncommitted; If all are committed, it
 153 //    returns true immediately.
 154 // - check if committing those uncommitted granules would bring us over the commit limit
 155 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 156 // - commit the memory.
 157 // - mark the range as committed in the commit mask
 158 //
 159 // !! Careful:
 160 //    calling ensure_range_is_committed on a range which contains both committed and uncommitted
 161 //    areas will commit the whole area, thus erase the content in the existing committed parts.
 162 //    Make sure you never call this on an address range containing live data. !!
 163 //
 164 // Returns true if success, false if it did hit a commit limit.
 165 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
 166 
 167   assert_lock_strong(MetaspaceExpand_lock);
 168   assert(p != NULL && word_size > 0, "Sanity");
 169 
 170   MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
 171   MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
 172 
 173   // Todo: simple for now. Make it more intelligent late
 174   return commit_range(p_start, p_end - p_start);
 175 
 176 }
 177 
 178 // Given an address range (which has to be aligned to commit granule size):
 179 //  - uncommit it
 180 //  - mark it as uncommitted in the commit mask
 181 void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
 182 
 183   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
 184   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
 185   assert_lock_strong(MetaspaceExpand_lock);


 186 
 187   // First calculate how large the committed regions in this range are
 188   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 189   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
 190 
 191   UL2(debug, "uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
 192       p2i(p), p2i(p + word_size), word_size);
 193 
 194   if (committed_words_in_range == 0) {
 195     UL(debug, "... already fully uncommitted.");
 196     return; // Already fully uncommitted, nothing to do.
 197   }
 198 
 199   // Uncommit...
 200   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
 201     // Note: this can actually happen, since uncommit may increase the number of mappings.
 202     fatal("Failed to uncommit metaspace.");
 203   }
 204 
 205   UL2(debug, "... uncommitted " SIZE_FORMAT " words.", committed_words_in_range);
 206 
 207   // ... tell commit limiter...
 208   _commit_limiter->decrease_committed(committed_words_in_range);
 209 
 210   // ... and global counters...
 211   _total_committed_words_counter->decrement_by(committed_words_in_range);
 212 
 213    // ... and update the commit mask.
 214   _commit_mask.mark_range_as_uncommitted(p, word_size);
 215 
 216 #ifdef ASSERT
 217   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 218   // in both class and non-class vslist (outside gtests).
 219   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
 220     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 221   }
 222 #endif
 223 
 224   InternalStats::inc_num_space_uncommitted();
 225 
 226 }
 227 
 228 //// creation, destruction ////
 229 
 230 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, bool owns_rs, CommitLimiter* limiter,
 231                                    SizeCounter* reserve_counter, SizeCounter* commit_counter)
 232   : _next(NULL),
 233     _rs(rs),
 234     _owns_rs(owns_rs),
 235     _base((MetaWord*)rs.base()),
 236     _word_size(rs.size() / BytesPerWord),
 237     _used_words(0),
 238     _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
 239     _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
 240     _commit_limiter(limiter),
 241     _total_reserved_words_counter(reserve_counter),
 242     _total_committed_words_counter(commit_counter)
 243 {
 244   UL2(debug, "born (word_size " SIZE_FORMAT ").", _word_size);
 245 
 246   // Update reserved counter in vslist
 247   _total_reserved_words_counter->increment_by(_word_size);
 248 
 249   assert_is_aligned(_base, chunklevel::MAX_CHUNK_BYTE_SIZE);
 250   assert_is_aligned(_word_size, chunklevel::MAX_CHUNK_WORD_SIZE);
 251 
 252 }
 253 

























































































































































 254 
 255 // Create a node of a given size (it will create its own space).
 256 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
 257                                                 CommitLimiter* limiter, SizeCounter* reserve_words_counter,
 258                                                 SizeCounter* commit_words_counter)
 259 {
 260 
 261   DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)






 262 
 263   ReservedSpace rs(word_size * BytesPerWord,
 264                    Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
 265                    false // large
 266                    );
 267 
 268   if (!rs.is_reserved()) {
 269     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
 270   }
 271 
 272   assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
 273 
 274   InternalStats::inc_num_vsnodes_births();
 275   return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);
 276 

 277 }
 278 
 279 // Create a node over an existing space
 280 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, CommitLimiter* limiter,
 281                                                 SizeCounter* reserve_words_counter, SizeCounter* commit_words_counter)
 282 {
 283   InternalStats::inc_num_vsnodes_births();
 284   return new VirtualSpaceNode(rs, false, limiter, reserve_words_counter, commit_words_counter);
 285 }
 286 
 287 VirtualSpaceNode::~VirtualSpaceNode() {
 288 
 289   DEBUG_ONLY(verify_locked(true);)



 290 
 291   UL(debug, ": dies.");
 292 
 293   if (_owns_rs) {
 294     _rs.release();
 295   }
 296 
 297   // Update counters in vslist
 298   size_t committed = committed_words();
 299   _total_committed_words_counter->decrement_by(committed);
 300   _total_reserved_words_counter->decrement_by(_word_size);
 301 
 302   // ... and tell commit limiter
 303   _commit_limiter->decrease_committed(committed);
 304 
 305   InternalStats::inc_num_vsnodes_deaths();
 306 
 307 }
 308 
 309 //// Chunk allocation, splitting, merging /////
 310 
 311 // Allocate a root chunk from this node. Will fail and return NULL if the node is full
 312 //  - if we used up the whole address space of this node's memory region.
 313 //    (in case this node backs compressed class space, this is how we hit
 314 //     CompressedClassSpaceSize).
 315 // Note that this just returns reserved memory; caller must take care of committing this
 316 //  chunk before using it.
 317 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 318 
 319   assert_lock_strong(MetaspaceExpand_lock);
 320 
 321   assert_is_aligned(free_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
 322 
 323   if (free_words() >= chunklevel::MAX_CHUNK_WORD_SIZE) {
 324 
 325     MetaWord* loc = _base + _used_words;
 326     _used_words += chunklevel::MAX_CHUNK_WORD_SIZE;
 327 
 328     RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
 329 
 330     // Create a root chunk header and initialize it;
 331     Metachunk* c = rca->alloc_root_chunk_header(this);
 332 
 333     assert(c->base() == loc && c->vsnode() == this &&
 334            c->is_free(), "Sanity");
 335 
 336     DEBUG_ONLY(c->verify(true);)
 337 
 338     UL2(debug, "new root chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
 339 
 340     return c;
 341 







 342   }
 343 
 344   return NULL; // Node is full.
 345 

 346 }
 347 
 348 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
 349 //
 350 // The resulting target chunk resides at the same address as the original chunk.
 351 // The resulting splinters are added to freelists.
 352 void VirtualSpaceNode::split(chunklevel_t target_level, Metachunk* c, FreeChunkListVector* freelists) {
 353 
 354   assert_lock_strong(MetaspaceExpand_lock);
 355 
 356   // Get the area associated with this chunk and let it handle the splitting
 357   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 358 
 359   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 360 
 361   rca->split(target_level, c, freelists);
 362 
 363 }
 364 
 365 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
 366 //
 367 // If successful (merged at least once), returns address of
 368 // the merged chunk; NULL otherwise.
 369 //
 370 // The merged chunks are removed from the freelists.
 371 //
 372 // !!! Please note that if this method returns a non-NULL value, the
 373 // original chunk will be invalid and should not be accessed anymore! !!!
 374 Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists) {
 375 
 376   assert(c != NULL && c->is_free(), "Sanity");
 377   assert_lock_strong(MetaspaceExpand_lock);
 378 
 379   // Get the rca associated with this chunk and let it handle the merging
 380   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 381 
 382   Metachunk* c2 = rca->merge(c, freelists);
 383 
 384   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 385 
 386   return c2;
 387 
 388 }
 389 
 390 // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
 391 // enlarge it in place by claiming its trailing buddy.
 392 //
 393 // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
 394 //
 395 // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
 396 // double in size (level decreased by one).
 397 //
 398 // On success, true is returned, false otherwise.
 399 bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists) {
 400 
 401   assert(c != NULL && c->is_in_use() && !c->is_root_chunk(), "Sanity");
 402   assert_lock_strong(MetaspaceExpand_lock);
 403 
 404   // Get the rca associated with this chunk and let it handle the merging
 405   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 406 
 407   bool rc = rca->attempt_enlarge_chunk(c, freelists);
 408 
 409   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 410 
 411   if (rc) {
 412     InternalStats::inc_num_chunks_enlarged();
 413   }
 414 
 415   return rc;




 416 
 417 }



 418 
 419 // Attempts to purge the node:
 420 //
 421 // If all chunks living in this node are free, they will all be removed from
 422 //  the freelist they currently reside in. Then, the node will be deleted.
 423 //
 424 // Returns true if the node has been deleted, false if not.
 425 // !! If this returns true, do not access the node from this point on. !!
 426 bool VirtualSpaceNode::attempt_purge(FreeChunkListVector* freelists) {
 427 
 428   assert_lock_strong(MetaspaceExpand_lock);
 429 
 430   if (!_owns_rs) {
 431     // We do not allow purging of nodes if we do not own the
 432     // underlying ReservedSpace (CompressClassSpace case).
 433     return false;
 434   }
 435 
 436   // First find out if all areas are empty. Since empty chunks collapse to root chunk
 437   // size, if all chunks in this node are free root chunks we are good to go.
 438   if (!_root_chunk_area_lut.is_free()) {
 439     return false;
 440   }
 441 
 442   UL(debug, ": purging.");
 443 
 444   // Okay, we can purge. Before we can do this, we need to remove all chunks from the freelist.
 445   for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea ++) {
 446     RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
 447     Metachunk* c = ra->first_chunk();
 448     if (c != NULL) {
 449       UL2(trace, "removing chunk from to-be-purged node: "
 450           METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
 451       assert(c->is_free() && c->is_root_chunk(), "Sanity");
 452       freelists->remove(c);
 453     }
 454   }
 455 
 456   // Now, delete the node, then right away return since this object is invalid.
 457   delete this;
 458 
 459   return true;
 460 

 461 }
 462 





 463 
 464 void VirtualSpaceNode::print_on(outputStream* st) const {
 465 
 466   size_t scale = K;
 467 
 468   st->print("base " PTR_FORMAT ": ", p2i(base()));
 469   st->print("reserved=");
 470   print_scaled_words(st, word_size(), scale);
 471   st->print(", committed=");
 472   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
 473   st->print(", used=");
 474   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
 475 
 476   st->cr();
 477   _root_chunk_area_lut.print_on(st);
 478   _commit_mask.print_on(st);
 479 

 480 }
 481 
 482 // Returns size, in words, of committed space in this node alone.
 483 // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
 484 size_t VirtualSpaceNode::committed_words() const {
 485   return _commit_mask.get_committed_size();
 486 }

 487 


 488 #ifdef ASSERT
 489 void VirtualSpaceNode::verify(bool slow) const {
 490   MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 491   verify_locked(slow);





















 492 }
 493 
 494 // Verify counters and basic structure. Slow mode: verify all chunks in depth
 495 void VirtualSpaceNode::verify_locked(bool slow) const {
 496 
 497   assert_lock_strong(MetaspaceExpand_lock);
 498 
 499   assert(base() != NULL, "Invalid base");
 500   assert(base() == (MetaWord*)_rs.base() &&
 501          word_size() == _rs.size() / BytesPerWord,
 502          "Sanity");
 503   assert_is_aligned(base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
 504   assert(used_words() <= word_size(), "Sanity");
 505 
 506   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
 507   // to root chunk size.
 508   assert_is_aligned(used_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
 509 
 510   _commit_mask.verify(slow);
 511   assert(committed_words() <= word_size(), "Sanity");
 512   assert_is_aligned(committed_words(), Settings::commit_granule_words());
 513   _root_chunk_area_lut.verify(slow);
 514 
 515 }
 516 
 517 #endif
 518 
 519 
 520 } // namespace metaspace
< prev index next >