1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "memory/metaspace/metachunk.hpp" 30 #include "memory/metaspace.hpp" 31 #include "memory/metaspace/chunkManager.hpp" 32 #include "memory/metaspace/metaspaceCommon.hpp" 33 #include "memory/metaspace/occupancyMap.hpp" 34 #include "memory/metaspace/virtualSpaceNode.hpp" 35 #include "memory/virtualspace.hpp" 36 #include "runtime/os.hpp" 37 #include "services/memTracker.hpp" 38 #include "utilities/copy.hpp" 39 #include "utilities/debug.hpp" 40 #include "utilities/globalDefinitions.hpp" 41 42 namespace metaspace { 43 44 // Decide if large pages should be committed when the memory is reserved. 45 static bool should_commit_large_pages_when_reserving(size_t bytes) { 46 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 47 size_t words = bytes / BytesPerWord; 48 bool is_class = false; // We never reserve large pages for the class space. 49 if (MetaspaceGC::can_expand(words, is_class) && 50 MetaspaceGC::allowed_expansion() >= words) { 51 return true; 52 } 53 } 54 55 return false; 56 } 57 58 // byte_size is the size of the associated virtualspace. 59 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) : 60 _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) { 61 assert_is_aligned(bytes, Metaspace::reserve_alignment()); 62 bool large_pages = should_commit_large_pages_when_reserving(bytes); 63 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 64 65 if (_rs.is_reserved()) { 66 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 67 assert(_rs.size() != 0, "Catch if we get a 0 size"); 68 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); 69 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); 70 71 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 72 } 73 } 74 75 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 76 DEBUG_ONLY(this->verify();) 77 Metachunk* chunk = first_chunk(); 78 Metachunk* invalid_chunk = (Metachunk*) top(); 79 while (chunk < invalid_chunk ) { 80 assert(chunk->is_tagged_free(), "Should be tagged free"); 81 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 82 chunk_manager->remove_chunk(chunk); 83 chunk->remove_sentinel(); 84 assert(chunk->next() == NULL && 85 chunk->prev() == NULL, 86 "Was not removed from its list"); 87 chunk = (Metachunk*) next; 88 } 89 } 90 91 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const { 92 93 if (bottom() == top()) { 94 return; 95 } 96 97 const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; 98 const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk; 99 const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk; 100 101 int line_len = 100; 102 const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size); 103 line_len = (int)(section_len / spec_chunk_size); 104 105 static const int NUM_LINES = 4; 106 107 char* lines[NUM_LINES]; 108 for (int i = 0; i < NUM_LINES; i ++) { 109 lines[i] = (char*)os::malloc(line_len, mtInternal); 110 } 111 int pos = 0; 112 const MetaWord* p = bottom(); 113 const Metachunk* chunk = (const Metachunk*)p; 114 const MetaWord* chunk_end = p + chunk->word_size(); 115 while (p < top()) { 116 if (pos == line_len) { 117 pos = 0; 118 for (int i = 0; i < NUM_LINES; i ++) { 119 st->fill_to(22); 120 st->print_raw(lines[i], line_len); 121 st->cr(); 122 } 123 } 124 if (pos == 0) { 125 st->print(PTR_FORMAT ":", p2i(p)); 126 } 127 if (p == chunk_end) { 128 chunk = (Metachunk*)p; 129 chunk_end = p + chunk->word_size(); 130 } 131 // line 1: chunk starting points (a dot if that area is a chunk start). 132 lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' '; 133 134 // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if 135 // chunk is in use. 136 const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free(); 137 if (chunk->word_size() == spec_chunk_size) { 138 lines[1][pos] = chunk_is_free ? 'x' : 'X'; 139 } else if (chunk->word_size() == small_chunk_size) { 140 lines[1][pos] = chunk_is_free ? 's' : 'S'; 141 } else if (chunk->word_size() == med_chunk_size) { 142 lines[1][pos] = chunk_is_free ? 'm' : 'M'; 143 } else if (chunk->word_size() > med_chunk_size) { 144 lines[1][pos] = chunk_is_free ? 'h' : 'H'; 145 } else { 146 ShouldNotReachHere(); 147 } 148 149 // Line 3: chunk origin 150 const ChunkOrigin origin = chunk->get_origin(); 151 lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin; 152 153 // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting, 154 // but were never used. 155 lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v'; 156 157 p += spec_chunk_size; 158 pos ++; 159 } 160 if (pos > 0) { 161 for (int i = 0; i < NUM_LINES; i ++) { 162 st->fill_to(22); 163 st->print_raw(lines[i], line_len); 164 st->cr(); 165 } 166 } 167 for (int i = 0; i < NUM_LINES; i ++) { 168 os::free(lines[i]); 169 } 170 } 171 172 173 #ifdef ASSERT 174 uintx VirtualSpaceNode::container_count_slow() { 175 uintx count = 0; 176 Metachunk* chunk = first_chunk(); 177 Metachunk* invalid_chunk = (Metachunk*) top(); 178 while (chunk < invalid_chunk ) { 179 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 180 do_verify_chunk(chunk); 181 // Don't count the chunks on the free lists. Those are 182 // still part of the VirtualSpaceNode but not currently 183 // counted. 184 if (!chunk->is_tagged_free()) { 185 count++; 186 } 187 chunk = (Metachunk*) next; 188 } 189 return count; 190 } 191 #endif 192 193 #ifdef ASSERT 194 // Verify counters, all chunks in this list node and the occupancy map. 195 void VirtualSpaceNode::verify() { 196 uintx num_in_use_chunks = 0; 197 Metachunk* chunk = first_chunk(); 198 Metachunk* invalid_chunk = (Metachunk*) top(); 199 200 // Iterate the chunks in this node and verify each chunk. 201 while (chunk < invalid_chunk ) { 202 DEBUG_ONLY(do_verify_chunk(chunk);) 203 if (!chunk->is_tagged_free()) { 204 num_in_use_chunks ++; 205 } 206 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 207 chunk = (Metachunk*) next; 208 } 209 assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT 210 ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count); 211 // Also verify the occupancy map. 212 occupancy_map()->verify(this->bottom(), this->top()); 213 } 214 #endif // ASSERT 215 216 #ifdef ASSERT 217 // Verify that all free chunks in this node are ideally merged 218 // (there not should be multiple small chunks where a large chunk could exist.) 219 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() { 220 Metachunk* chunk = first_chunk(); 221 Metachunk* invalid_chunk = (Metachunk*) top(); 222 // Shorthands. 223 const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord; 224 const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord; 225 int num_free_chunks_since_last_med_boundary = -1; 226 int num_free_chunks_since_last_small_boundary = -1; 227 while (chunk < invalid_chunk ) { 228 // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary. 229 // Reset the counter when encountering a non-free chunk. 230 if (chunk->get_chunk_type() != HumongousIndex) { 231 if (chunk->is_tagged_free()) { 232 // Count successive free, non-humongous chunks. 233 if (is_aligned(chunk, size_small)) { 234 assert(num_free_chunks_since_last_small_boundary <= 1, 235 "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small); 236 num_free_chunks_since_last_small_boundary = 0; 237 } else if (num_free_chunks_since_last_small_boundary != -1) { 238 num_free_chunks_since_last_small_boundary ++; 239 } 240 if (is_aligned(chunk, size_med)) { 241 assert(num_free_chunks_since_last_med_boundary <= 1, 242 "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med); 243 num_free_chunks_since_last_med_boundary = 0; 244 } else if (num_free_chunks_since_last_med_boundary != -1) { 245 num_free_chunks_since_last_med_boundary ++; 246 } 247 } else { 248 // Encountering a non-free chunk, reset counters. 249 num_free_chunks_since_last_med_boundary = -1; 250 num_free_chunks_since_last_small_boundary = -1; 251 } 252 } else { 253 // One cannot merge areas with a humongous chunk in the middle. Reset counters. 254 num_free_chunks_since_last_med_boundary = -1; 255 num_free_chunks_since_last_small_boundary = -1; 256 } 257 258 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 259 chunk = (Metachunk*) next; 260 } 261 } 262 #endif // ASSERT 263 264 void VirtualSpaceNode::inc_container_count() { 265 assert_lock_strong(MetaspaceExpand_lock); 266 _container_count++; 267 } 268 269 void VirtualSpaceNode::dec_container_count() { 270 assert_lock_strong(MetaspaceExpand_lock); 271 _container_count--; 272 } 273 274 #ifdef ASSERT 275 void VirtualSpaceNode::verify_container_count() { 276 assert(_container_count == container_count_slow(), 277 "Inconsistency in container_count _container_count " UINTX_FORMAT 278 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 279 } 280 #endif 281 282 VirtualSpaceNode::~VirtualSpaceNode() { 283 _rs.release(); 284 if (_occupancy_map != NULL) { 285 delete _occupancy_map; 286 } 287 #ifdef ASSERT 288 size_t word_size = sizeof(*this) / BytesPerWord; 289 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 290 #endif 291 } 292 293 size_t VirtualSpaceNode::used_words_in_vs() const { 294 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 295 } 296 297 // Space committed in the VirtualSpace 298 size_t VirtualSpaceNode::capacity_words_in_vs() const { 299 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 300 } 301 302 size_t VirtualSpaceNode::free_words_in_vs() const { 303 return pointer_delta(end(), top(), sizeof(MetaWord)); 304 } 305 306 // Given an address larger than top(), allocate padding chunks until top is at the given address. 307 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) { 308 309 assert(target_top > top(), "Sanity"); 310 311 // Padding chunks are added to the freelist. 312 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class()); 313 314 // shorthands 315 const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); 316 const size_t small_word_size = chunk_manager->small_chunk_word_size(); 317 const size_t med_word_size = chunk_manager->medium_chunk_word_size(); 318 319 while (top() < target_top) { 320 321 // We could make this coding more generic, but right now we only deal with two possible chunk sizes 322 // for padding chunks, so it is not worth it. 323 size_t padding_chunk_word_size = small_word_size; 324 if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) { 325 assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true. 326 padding_chunk_word_size = spec_word_size; 327 } 328 MetaWord* here = top(); 329 assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord)); 330 inc_top(padding_chunk_word_size); 331 332 // Create new padding chunk. 333 ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class()); 334 assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity"); 335 336 Metachunk* const padding_chunk = 337 ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this); 338 assert(padding_chunk == (Metachunk*)here, "Sanity"); 339 DEBUG_ONLY(padding_chunk->set_origin(origin_pad);) 340 log_trace(gc, metaspace, freelist)("Created padding chunk in %s at " 341 PTR_FORMAT ", size " SIZE_FORMAT_HEX ".", 342 (is_class() ? "class space " : "metaspace"), 343 p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord)); 344 345 // Mark chunk start in occupancy map. 346 occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true); 347 348 // Chunks are born as in-use (see MetaChunk ctor). So, before returning 349 // the padding chunk to its chunk manager, mark it as in use (ChunkManager 350 // will assert that). 351 do_update_in_use_info_for_chunk(padding_chunk, true); 352 353 // Return Chunk to freelist. 354 inc_container_count(); 355 chunk_manager->return_single_chunk(padding_chunk); 356 // Please note: at this point, ChunkManager::return_single_chunk() 357 // may already have merged the padding chunk with neighboring chunks, so 358 // it may have vanished at this point. Do not reference the padding 359 // chunk beyond this point. 360 } 361 362 assert(top() == target_top, "Sanity"); 363 364 } // allocate_padding_chunks_until_top_is_at() 365 366 // Allocates the chunk from the virtual space only. 367 // This interface is also used internally for debugging. Not all 368 // chunks removed here are necessarily used for allocation. 369 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 370 // Non-humongous chunks are to be allocated aligned to their chunk 371 // size. So, start addresses of medium chunks are aligned to medium 372 // chunk size, those of small chunks to small chunk size and so 373 // forth. This facilitates merging of free chunks and reduces 374 // fragmentation. Chunk sizes are spec < small < medium, with each 375 // larger chunk size being a multiple of the next smaller chunk 376 // size. 377 // Because of this alignment, me may need to create a number of padding 378 // chunks. These chunks are created and added to the freelist. 379 380 // The chunk manager to which we will give our padding chunks. 381 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class()); 382 383 // shorthands 384 const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); 385 const size_t small_word_size = chunk_manager->small_chunk_word_size(); 386 const size_t med_word_size = chunk_manager->medium_chunk_word_size(); 387 388 assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size || 389 chunk_word_size >= med_word_size, "Invalid chunk size requested."); 390 391 // Chunk alignment (in bytes) == chunk size unless humongous. 392 // Humongous chunks are aligned to the smallest chunk size (spec). 393 const size_t required_chunk_alignment = (chunk_word_size > med_word_size ? 394 spec_word_size : chunk_word_size) * sizeof(MetaWord); 395 396 // Do we have enough space to create the requested chunk plus 397 // any padding chunks needed? 398 MetaWord* const next_aligned = 399 static_cast<MetaWord*>(align_up(top(), required_chunk_alignment)); 400 if (!is_available((next_aligned - top()) + chunk_word_size)) { 401 return NULL; 402 } 403 404 // Before allocating the requested chunk, allocate padding chunks if necessary. 405 // We only need to do this for small or medium chunks: specialized chunks are the 406 // smallest size, hence always aligned. Homungous chunks are allocated unaligned 407 // (implicitly, also aligned to smallest chunk size). 408 if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top()) { 409 log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...", 410 (is_class() ? "class space " : "metaspace"), 411 top(), next_aligned); 412 allocate_padding_chunks_until_top_is_at(next_aligned); 413 // Now, top should be aligned correctly. 414 assert_is_aligned(top(), required_chunk_alignment); 415 } 416 417 // Now, top should be aligned correctly. 418 assert_is_aligned(top(), required_chunk_alignment); 419 420 // Bottom of the new chunk 421 MetaWord* chunk_limit = top(); 422 assert(chunk_limit != NULL, "Not safe to call this method"); 423 424 // The virtual spaces are always expanded by the 425 // commit granularity to enforce the following condition. 426 // Without this the is_available check will not work correctly. 427 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 428 "The committed memory doesn't match the expanded memory."); 429 430 if (!is_available(chunk_word_size)) { 431 LogTarget(Debug, gc, metaspace, freelist) lt; 432 if (lt.is_enabled()) { 433 LogStream ls(lt); 434 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 435 // Dump some information about the virtual space that is nearly full 436 print_on(&ls); 437 } 438 return NULL; 439 } 440 441 // Take the space (bump top on the current virtual space). 442 inc_top(chunk_word_size); 443 444 // Initialize the chunk 445 ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class()); 446 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this); 447 assert(result == (Metachunk*)chunk_limit, "Sanity"); 448 occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true); 449 do_update_in_use_info_for_chunk(result, true); 450 451 inc_container_count(); 452 453 if (VerifyMetaspace) { 454 DEBUG_ONLY(chunk_manager->locked_verify()); 455 DEBUG_ONLY(this->verify()); 456 } 457 458 DEBUG_ONLY(do_verify_chunk(result)); 459 460 result->inc_use_count(); 461 462 return result; 463 } 464 465 466 // Expand the virtual space (commit more of the reserved space) 467 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 468 size_t min_bytes = min_words * BytesPerWord; 469 size_t preferred_bytes = preferred_words * BytesPerWord; 470 471 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 472 473 if (uncommitted < min_bytes) { 474 return false; 475 } 476 477 size_t commit = MIN2(preferred_bytes, uncommitted); 478 bool result = virtual_space()->expand_by(commit, false); 479 480 if (result) { 481 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.", 482 (is_class() ? "class" : "non-class"), commit); 483 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded)); 484 } else { 485 log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.", 486 (is_class() ? "class" : "non-class"), commit); 487 } 488 489 assert(result, "Failed to commit memory"); 490 491 return result; 492 } 493 494 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 495 assert_lock_strong(MetaspaceExpand_lock); 496 Metachunk* result = take_from_committed(chunk_word_size); 497 return result; 498 } 499 500 bool VirtualSpaceNode::initialize() { 501 502 if (!_rs.is_reserved()) { 503 return false; 504 } 505 506 // These are necessary restriction to make sure that the virtual space always 507 // grows in steps of Metaspace::commit_alignment(). If both base and size are 508 // aligned only the middle alignment of the VirtualSpace is used. 509 assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); 510 assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); 511 512 // ReservedSpaces marked as special will have the entire memory 513 // pre-committed. Setting a committed size will make sure that 514 // committed_size and actual_committed_size agrees. 515 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 516 517 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 518 Metaspace::commit_alignment()); 519 if (result) { 520 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 521 "Checking that the pre-committed memory was registered by the VirtualSpace"); 522 523 set_top((MetaWord*)virtual_space()->low()); 524 } 525 526 // Initialize Occupancy Map. 527 const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk; 528 _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size); 529 530 return result; 531 } 532 533 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const { 534 size_t used_words = used_words_in_vs(); 535 size_t commit_words = committed_words(); 536 size_t res_words = reserved_words(); 537 VirtualSpace* vs = virtual_space(); 538 539 st->print("node @" PTR_FORMAT ": ", p2i(this)); 540 st->print("reserved="); 541 print_scaled_words(st, res_words, scale); 542 st->print(", committed="); 543 print_scaled_words_and_percentage(st, commit_words, res_words, scale); 544 st->print(", used="); 545 print_scaled_words_and_percentage(st, used_words, res_words, scale); 546 st->cr(); 547 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " 548 PTR_FORMAT ", " PTR_FORMAT ")", 549 p2i(bottom()), p2i(top()), p2i(end()), 550 p2i(vs->high_boundary())); 551 } 552 553 #ifdef ASSERT 554 void VirtualSpaceNode::mangle() { 555 size_t word_size = capacity_words_in_vs(); 556 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 557 } 558 #endif // ASSERT 559 560 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 561 DEBUG_ONLY(verify_container_count();) 562 assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?"); 563 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 564 ChunkIndex index = (ChunkIndex)i; 565 size_t chunk_size = chunk_manager->size_by_index(index); 566 567 while (free_words_in_vs() >= chunk_size) { 568 Metachunk* chunk = get_chunk_vs(chunk_size); 569 // Chunk will be allocated aligned, so allocation may require 570 // additional padding chunks. That may cause above allocation to 571 // fail. Just ignore the failed allocation and continue with the 572 // next smaller chunk size. As the VirtualSpaceNode comitted 573 // size should be a multiple of the smallest chunk size, we 574 // should always be able to fill the VirtualSpace completely. 575 if (chunk == NULL) { 576 break; 577 } 578 chunk_manager->return_single_chunk(chunk); 579 } 580 DEBUG_ONLY(verify_container_count();) 581 } 582 assert(free_words_in_vs() == 0, "should be empty now"); 583 } 584 585 } // namespace metaspace 586