1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 
  27 #include "precompiled.hpp"
  28 
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/metaspace/metachunk.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/metaspace/chunkManager.hpp"
  34 #include "memory/metaspace/metaspaceCommon.hpp"
  35 #include "memory/metaspace/occupancyMap.hpp"
  36 #include "memory/metaspace/virtualSpaceNode.hpp"
  37 #include "memory/virtualspace.hpp"
  38 #include "runtime/os.hpp"
  39 #include "services/memTracker.hpp"
  40 #include "utilities/copy.hpp"
  41 #include "utilities/debug.hpp"
  42 #include "utilities/globalDefinitions.hpp"
  43 
  44 namespace metaspace {
  45 namespace internals {
  46 
  47 // Decide if large pages should be committed when the memory is reserved.
  48 static bool should_commit_large_pages_when_reserving(size_t bytes) {
  49   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
  50     size_t words = bytes / BytesPerWord;
  51     bool is_class = false; // We never reserve large pages for the class space.
  52     if (MetaspaceGC::can_expand(words, is_class) &&
  53         MetaspaceGC::allowed_expansion() >= words) {
  54       return true;
  55     }
  56   }
  57 
  58   return false;
  59 }
  60 
  61 // byte_size is the size of the associated virtualspace.
  62 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
  63     _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
  64   assert_is_aligned(bytes, Metaspace::reserve_alignment());
  65   bool large_pages = should_commit_large_pages_when_reserving(bytes);
  66   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
  67 
  68   if (_rs.is_reserved()) {
  69     assert(_rs.base() != NULL, "Catch if we get a NULL address");
  70     assert(_rs.size() != 0, "Catch if we get a 0 size");
  71     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
  72     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
  73 
  74     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
  75   }
  76 }
  77 
  78 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  79   DEBUG_ONLY(this->verify();)
  80     Metachunk* chunk = first_chunk();
  81   Metachunk* invalid_chunk = (Metachunk*) top();
  82   while (chunk < invalid_chunk ) {
  83     assert(chunk->is_tagged_free(), "Should be tagged free");
  84     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
  85     chunk_manager->remove_chunk(chunk);
  86     chunk->remove_sentinel();
  87     assert(chunk->next() == NULL &&
  88         chunk->prev() == NULL,
  89         "Was not removed from its list");
  90     chunk = (Metachunk*) next;
  91   }
  92 }
  93 
  94 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
  95 
  96   if (bottom() == top()) {
  97     return;
  98   }
  99 
 100   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 101   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 102   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 103 
 104   int line_len = 100;
 105   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 106   line_len = (int)(section_len / spec_chunk_size);
 107 
 108   static const int NUM_LINES = 4;
 109 
 110   char* lines[NUM_LINES];
 111   for (int i = 0; i < NUM_LINES; i ++) {
 112     lines[i] = (char*)os::malloc(line_len, mtInternal);
 113   }
 114   int pos = 0;
 115   const MetaWord* p = bottom();
 116   const Metachunk* chunk = (const Metachunk*)p;
 117   const MetaWord* chunk_end = p + chunk->word_size();
 118   while (p < top()) {
 119     if (pos == line_len) {
 120       pos = 0;
 121       for (int i = 0; i < NUM_LINES; i ++) {
 122         st->fill_to(22);
 123         st->print_raw(lines[i], line_len);
 124         st->cr();
 125       }
 126     }
 127     if (pos == 0) {
 128       st->print(PTR_FORMAT ":", p2i(p));
 129     }
 130     if (p == chunk_end) {
 131       chunk = (Metachunk*)p;
 132       chunk_end = p + chunk->word_size();
 133     }
 134     // line 1: chunk starting points (a dot if that area is a chunk start).
 135     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 136 
 137     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 138     // chunk is in use.
 139     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 140     if (chunk->word_size() == spec_chunk_size) {
 141       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 142     } else if (chunk->word_size() == small_chunk_size) {
 143       lines[1][pos] = chunk_is_free ? 's' : 'S';
 144     } else if (chunk->word_size() == med_chunk_size) {
 145       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 146     } else if (chunk->word_size() > med_chunk_size) {
 147       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 148     } else {
 149       ShouldNotReachHere();
 150     }
 151 
 152     // Line 3: chunk origin
 153     const ChunkOrigin origin = chunk->get_origin();
 154     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 155 
 156     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 157     //         but were never used.
 158     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 159 
 160     p += spec_chunk_size;
 161     pos ++;
 162   }
 163   if (pos > 0) {
 164     for (int i = 0; i < NUM_LINES; i ++) {
 165       st->fill_to(22);
 166       st->print_raw(lines[i], line_len);
 167       st->cr();
 168     }
 169   }
 170   for (int i = 0; i < NUM_LINES; i ++) {
 171     os::free(lines[i]);
 172   }
 173 }
 174 
 175 
 176 #ifdef ASSERT
 177 uintx VirtualSpaceNode::container_count_slow() {
 178   uintx count = 0;
 179   Metachunk* chunk = first_chunk();
 180   Metachunk* invalid_chunk = (Metachunk*) top();
 181   while (chunk < invalid_chunk ) {
 182     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 183     do_verify_chunk(chunk);
 184     // Don't count the chunks on the free lists.  Those are
 185     // still part of the VirtualSpaceNode but not currently
 186     // counted.
 187     if (!chunk->is_tagged_free()) {
 188       count++;
 189     }
 190     chunk = (Metachunk*) next;
 191   }
 192   return count;
 193 }
 194 #endif
 195 
 196 #ifdef ASSERT
 197 // Verify counters, all chunks in this list node and the occupancy map.
 198 void VirtualSpaceNode::verify() {
 199   uintx num_in_use_chunks = 0;
 200   Metachunk* chunk = first_chunk();
 201   Metachunk* invalid_chunk = (Metachunk*) top();
 202 
 203   // Iterate the chunks in this node and verify each chunk.
 204   while (chunk < invalid_chunk ) {
 205     DEBUG_ONLY(do_verify_chunk(chunk);)
 206       if (!chunk->is_tagged_free()) {
 207         num_in_use_chunks ++;
 208       }
 209     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 210     chunk = (Metachunk*) next;
 211   }
 212   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
 213       ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
 214   // Also verify the occupancy map.
 215   occupancy_map()->verify(this->bottom(), this->top());
 216 }
 217 #endif // ASSERT
 218 
 219 #ifdef ASSERT
 220 // Verify that all free chunks in this node are ideally merged
 221 // (there not should be multiple small chunks where a large chunk could exist.)
 222 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
 223   Metachunk* chunk = first_chunk();
 224   Metachunk* invalid_chunk = (Metachunk*) top();
 225   // Shorthands.
 226   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
 227   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
 228   int num_free_chunks_since_last_med_boundary = -1;
 229   int num_free_chunks_since_last_small_boundary = -1;
 230   while (chunk < invalid_chunk ) {
 231     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
 232     // Reset the counter when encountering a non-free chunk.
 233     if (chunk->get_chunk_type() != HumongousIndex) {
 234       if (chunk->is_tagged_free()) {
 235         // Count successive free, non-humongous chunks.
 236         if (is_aligned(chunk, size_small)) {
 237           assert(num_free_chunks_since_last_small_boundary <= 1,
 238               "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
 239           num_free_chunks_since_last_small_boundary = 0;
 240         } else if (num_free_chunks_since_last_small_boundary != -1) {
 241           num_free_chunks_since_last_small_boundary ++;
 242         }
 243         if (is_aligned(chunk, size_med)) {
 244           assert(num_free_chunks_since_last_med_boundary <= 1,
 245               "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
 246           num_free_chunks_since_last_med_boundary = 0;
 247         } else if (num_free_chunks_since_last_med_boundary != -1) {
 248           num_free_chunks_since_last_med_boundary ++;
 249         }
 250       } else {
 251         // Encountering a non-free chunk, reset counters.
 252         num_free_chunks_since_last_med_boundary = -1;
 253         num_free_chunks_since_last_small_boundary = -1;
 254       }
 255     } else {
 256       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
 257       num_free_chunks_since_last_med_boundary = -1;
 258       num_free_chunks_since_last_small_boundary = -1;
 259     }
 260 
 261     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 262     chunk = (Metachunk*) next;
 263   }
 264 }
 265 #endif // ASSERT
 266 
 267 void VirtualSpaceNode::inc_container_count() {
 268   assert_lock_strong(MetaspaceExpand_lock);
 269   _container_count++;
 270 }
 271 
 272 void VirtualSpaceNode::dec_container_count() {
 273   assert_lock_strong(MetaspaceExpand_lock);
 274   _container_count--;
 275 }
 276 
 277 #ifdef ASSERT
 278 void VirtualSpaceNode::verify_container_count() {
 279   assert(_container_count == container_count_slow(),
 280       "Inconsistency in container_count _container_count " UINTX_FORMAT
 281       " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow());
 282 }
 283 #endif
 284 
 285 VirtualSpaceNode::~VirtualSpaceNode() {
 286   _rs.release();
 287   if (_occupancy_map != NULL) {
 288     delete _occupancy_map;
 289   }
 290 #ifdef ASSERT
 291   size_t word_size = sizeof(*this) / BytesPerWord;
 292   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 293 #endif
 294 }
 295 
 296 size_t VirtualSpaceNode::used_words_in_vs() const {
 297   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 298 }
 299 
 300 // Space committed in the VirtualSpace
 301 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 302   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 303 }
 304 
 305 size_t VirtualSpaceNode::free_words_in_vs() const {
 306   return pointer_delta(end(), top(), sizeof(MetaWord));
 307 }
 308 
 309 // Given an address larger than top(), allocate padding chunks until top is at the given address.
 310 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
 311 
 312   assert(target_top > top(), "Sanity");
 313 
 314   // Padding chunks are added to the freelist.
 315   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
 316 
 317   // shorthands
 318   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
 319   const size_t small_word_size = chunk_manager->small_chunk_word_size();
 320   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
 321 
 322   while (top() < target_top) {
 323 
 324     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
 325     // for padding chunks, so it is not worth it.
 326     size_t padding_chunk_word_size = small_word_size;
 327     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
 328       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
 329       padding_chunk_word_size = spec_word_size;
 330     }
 331     MetaWord* here = top();
 332     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
 333     inc_top(padding_chunk_word_size);
 334 
 335     // Create new padding chunk.
 336     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
 337     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
 338 
 339     Metachunk* const padding_chunk =
 340         ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
 341     assert(padding_chunk == (Metachunk*)here, "Sanity");
 342     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
 343     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
 344         PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
 345         (is_class() ? "class space " : "metaspace"),
 346         p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
 347 
 348     // Mark chunk start in occupancy map.
 349     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
 350 
 351     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
 352     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
 353     // will assert that).
 354     do_update_in_use_info_for_chunk(padding_chunk, true);
 355 
 356     // Return Chunk to freelist.
 357     inc_container_count();
 358     chunk_manager->return_single_chunk(padding_chunk);
 359     // Please note: at this point, ChunkManager::return_single_chunk()
 360     // may already have merged the padding chunk with neighboring chunks, so
 361     // it may have vanished at this point. Do not reference the padding
 362     // chunk beyond this point.
 363   }
 364 
 365   assert(top() == target_top, "Sanity");
 366 
 367 } // allocate_padding_chunks_until_top_is_at()
 368 
 369 // Allocates the chunk from the virtual space only.
 370 // This interface is also used internally for debugging.  Not all
 371 // chunks removed here are necessarily used for allocation.
 372 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 373   // Non-humongous chunks are to be allocated aligned to their chunk
 374   // size. So, start addresses of medium chunks are aligned to medium
 375   // chunk size, those of small chunks to small chunk size and so
 376   // forth. This facilitates merging of free chunks and reduces
 377   // fragmentation. Chunk sizes are spec < small < medium, with each
 378   // larger chunk size being a multiple of the next smaller chunk
 379   // size.
 380   // Because of this alignment, me may need to create a number of padding
 381   // chunks. These chunks are created and added to the freelist.
 382 
 383   // The chunk manager to which we will give our padding chunks.
 384   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
 385 
 386   // shorthands
 387   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
 388   const size_t small_word_size = chunk_manager->small_chunk_word_size();
 389   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
 390 
 391   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
 392       chunk_word_size >= med_word_size, "Invalid chunk size requested.");
 393 
 394   // Chunk alignment (in bytes) == chunk size unless humongous.
 395   // Humongous chunks are aligned to the smallest chunk size (spec).
 396   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
 397       spec_word_size : chunk_word_size) * sizeof(MetaWord);
 398 
 399   // Do we have enough space to create the requested chunk plus
 400   // any padding chunks needed?
 401   MetaWord* const next_aligned =
 402       static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
 403   if (!is_available((next_aligned - top()) + chunk_word_size)) {
 404     return NULL;
 405   }
 406 
 407   // Before allocating the requested chunk, allocate padding chunks if necessary.
 408   // We only need to do this for small or medium chunks: specialized chunks are the
 409   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
 410   // (implicitly, also aligned to smallest chunk size).
 411   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
 412     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
 413         (is_class() ? "class space " : "metaspace"),
 414         top(), next_aligned);
 415     allocate_padding_chunks_until_top_is_at(next_aligned);
 416     // Now, top should be aligned correctly.
 417     assert_is_aligned(top(), required_chunk_alignment);
 418   }
 419 
 420   // Now, top should be aligned correctly.
 421   assert_is_aligned(top(), required_chunk_alignment);
 422 
 423   // Bottom of the new chunk
 424   MetaWord* chunk_limit = top();
 425   assert(chunk_limit != NULL, "Not safe to call this method");
 426 
 427   // The virtual spaces are always expanded by the
 428   // commit granularity to enforce the following condition.
 429   // Without this the is_available check will not work correctly.
 430   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 431       "The committed memory doesn't match the expanded memory.");
 432 
 433   if (!is_available(chunk_word_size)) {
 434     LogTarget(Debug, gc, metaspace, freelist) lt;
 435     if (lt.is_enabled()) {
 436       LogStream ls(lt);
 437       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 438       // Dump some information about the virtual space that is nearly full
 439       print_on(&ls);
 440     }
 441     return NULL;
 442   }
 443 
 444   // Take the space  (bump top on the current virtual space).
 445   inc_top(chunk_word_size);
 446 
 447   // Initialize the chunk
 448   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
 449   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
 450   assert(result == (Metachunk*)chunk_limit, "Sanity");
 451   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
 452   do_update_in_use_info_for_chunk(result, true);
 453 
 454   inc_container_count();
 455 
 456   if (VerifyMetaspace) {
 457     DEBUG_ONLY(chunk_manager->locked_verify());
 458     DEBUG_ONLY(this->verify());
 459   }
 460 
 461   DEBUG_ONLY(do_verify_chunk(result));
 462 
 463   result->inc_use_count();
 464 
 465   return result;
 466 }
 467 
 468 
 469 // Expand the virtual space (commit more of the reserved space)
 470 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 471   size_t min_bytes = min_words * BytesPerWord;
 472   size_t preferred_bytes = preferred_words * BytesPerWord;
 473 
 474   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 475 
 476   if (uncommitted < min_bytes) {
 477     return false;
 478   }
 479 
 480   size_t commit = MIN2(preferred_bytes, uncommitted);
 481   bool result = virtual_space()->expand_by(commit, false);
 482 
 483   if (result) {
 484     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
 485         (is_class() ? "class" : "non-class"), commit);
 486     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
 487   } else {
 488     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
 489         (is_class() ? "class" : "non-class"), commit);
 490   }
 491 
 492   assert(result, "Failed to commit memory");
 493 
 494   return result;
 495 }
 496 
 497 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
 498   assert_lock_strong(MetaspaceExpand_lock);
 499   Metachunk* result = take_from_committed(chunk_word_size);
 500   return result;
 501 }
 502 
 503 bool VirtualSpaceNode::initialize() {
 504 
 505   if (!_rs.is_reserved()) {
 506     return false;
 507   }
 508 
 509   // These are necessary restriction to make sure that the virtual space always
 510   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 511   // aligned only the middle alignment of the VirtualSpace is used.
 512   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
 513   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
 514 
 515   // ReservedSpaces marked as special will have the entire memory
 516   // pre-committed. Setting a committed size will make sure that
 517   // committed_size and actual_committed_size agrees.
 518   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 519 
 520   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 521       Metaspace::commit_alignment());
 522   if (result) {
 523     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 524         "Checking that the pre-committed memory was registered by the VirtualSpace");
 525 
 526     set_top((MetaWord*)virtual_space()->low());
 527     set_reserved(MemRegion((HeapWord*)_rs.base(),
 528         (HeapWord*)(_rs.base() + _rs.size())));
 529 
 530     assert(reserved()->start() == (HeapWord*) _rs.base(),
 531         "Reserved start was not set properly " PTR_FORMAT
 532         " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
 533     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
 534         "Reserved size was not set properly " SIZE_FORMAT
 535         " != " SIZE_FORMAT, reserved()->word_size(),
 536         _rs.size() / BytesPerWord);
 537   }
 538 
 539   // Initialize Occupancy Map.
 540   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
 541   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
 542 
 543   return result;
 544 }
 545 
 546 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
 547   size_t used_words = used_words_in_vs();
 548   size_t commit_words = committed_words();
 549   size_t res_words = reserved_words();
 550   VirtualSpace* vs = virtual_space();
 551 
 552   st->print("node @" PTR_FORMAT ": ", p2i(this));
 553   st->print("reserved=");
 554   print_scaled_words(st, res_words, scale);
 555   st->print(", committed=");
 556   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
 557   st->print(", used=");
 558   print_scaled_words_and_percentage(st, used_words, res_words, scale);
 559   st->cr();
 560   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
 561       PTR_FORMAT ", " PTR_FORMAT ")",
 562       p2i(bottom()), p2i(top()), p2i(end()),
 563       p2i(vs->high_boundary()));
 564 }
 565 
 566 #ifdef ASSERT
 567 void VirtualSpaceNode::mangle() {
 568   size_t word_size = capacity_words_in_vs();
 569   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
 570 }
 571 #endif // ASSERT
 572 
 573 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
 574   DEBUG_ONLY(verify_container_count();)
 575   assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
 576   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
 577     ChunkIndex index = (ChunkIndex)i;
 578     size_t chunk_size = chunk_manager->size_by_index(index);
 579 
 580     while (free_words_in_vs() >= chunk_size) {
 581       Metachunk* chunk = get_chunk_vs(chunk_size);
 582       // Chunk will be allocated aligned, so allocation may require
 583       // additional padding chunks. That may cause above allocation to
 584       // fail. Just ignore the failed allocation and continue with the
 585       // next smaller chunk size. As the VirtualSpaceNode comitted
 586       // size should be a multiple of the smallest chunk size, we
 587       // should always be able to fill the VirtualSpace completely.
 588       if (chunk == NULL) {
 589         break;
 590       }
 591       chunk_manager->return_single_chunk(chunk);
 592     }
 593     DEBUG_ONLY(verify_container_count();)
 594   }
 595   assert(free_words_in_vs() == 0, "should be empty now");
 596 }
 597 
 598 } // namespace metaspace
 599 } // namespace internals