< prev index next >

src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp

Print this page
rev 57511 : [mq]: metaspace-improvement


   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 

  25 #include "precompiled.hpp"
  26 
  27 #include "logging/log.hpp"
  28 #include "logging/logStream.hpp"





  29 #include "memory/metaspace/metachunk.hpp"
  30 #include "memory/metaspace.hpp"
  31 #include "memory/metaspace/chunkManager.hpp"
  32 #include "memory/metaspace/metaDebug.hpp"
  33 #include "memory/metaspace/metaspaceCommon.hpp"
  34 #include "memory/metaspace/occupancyMap.hpp"
  35 #include "memory/metaspace/virtualSpaceNode.hpp"
  36 #include "memory/virtualspace.hpp"

  37 #include "runtime/os.hpp"
  38 #include "services/memTracker.hpp"
  39 #include "utilities/copy.hpp"
  40 #include "utilities/debug.hpp"
  41 #include "utilities/globalDefinitions.hpp"

  42 
  43 namespace metaspace {
  44 
  45 // Decide if large pages should be committed when the memory is reserved.
  46 static bool should_commit_large_pages_when_reserving(size_t bytes) {
  47   if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
  48     size_t words = bytes / BytesPerWord;
  49     bool is_class = false; // We never reserve large pages for the class space.
  50     if (MetaspaceGC::can_expand(words, is_class) &&
  51         MetaspaceGC::allowed_expansion() >= words) {
  52       return true;
  53     }
  54   }
  55 
  56   return false;
  57 }

  58 
  59 // byte_size is the size of the associated virtualspace.
  60 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
  61     _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) {
  62   assert_is_aligned(bytes, Metaspace::reserve_alignment());
  63   bool large_pages = should_commit_large_pages_when_reserving(bytes);
  64   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
  65 
  66   if (_rs.is_reserved()) {
  67     assert(_rs.base() != NULL, "Catch if we get a NULL address");
  68     assert(_rs.size() != 0, "Catch if we get a 0 size");
  69     assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
  70     assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());


  71 
  72     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
  73   }
  74 }
  75 
  76 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  77   // When a node is purged, lets give it a thorough examination.
  78   DEBUG_ONLY(verify(true);)
  79   Metachunk* chunk = first_chunk();
  80   Metachunk* invalid_chunk = (Metachunk*) top();
  81   while (chunk < invalid_chunk ) {
  82     assert(chunk->is_tagged_free(), "Should be tagged free");
  83     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
  84     chunk_manager->remove_chunk(chunk);
  85     chunk->remove_sentinel();
  86     assert(chunk->next() == NULL &&
  87         chunk->prev() == NULL,
  88         "Was not removed from its list");
  89     chunk = (Metachunk*) next;
  90   }
  91 }
  92 
  93 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {


  94 
  95   if (bottom() == top()) {
  96     return;
  97   }
  98 
  99   const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 100   const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
 101   const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
 102 
 103   int line_len = 100;
 104   const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
 105   line_len = (int)(section_len / spec_chunk_size);
 106 
 107   static const int NUM_LINES = 4;
 108 
 109   char* lines[NUM_LINES];
 110   for (int i = 0; i < NUM_LINES; i ++) {
 111     lines[i] = (char*)os::malloc(line_len, mtInternal);
 112   }
 113   int pos = 0;
 114   const MetaWord* p = bottom();
 115   const Metachunk* chunk = (const Metachunk*)p;
 116   const MetaWord* chunk_end = p + chunk->word_size();
 117   while (p < top()) {
 118     if (pos == line_len) {
 119       pos = 0;
 120       for (int i = 0; i < NUM_LINES; i ++) {
 121         st->fill_to(22);
 122         st->print_raw(lines[i], line_len);
 123         st->cr();
 124       }
 125     }
 126     if (pos == 0) {
 127       st->print(PTR_FORMAT ":", p2i(p));
 128     }
 129     if (p == chunk_end) {
 130       chunk = (Metachunk*)p;
 131       chunk_end = p + chunk->word_size();
 132     }
 133     // line 1: chunk starting points (a dot if that area is a chunk start).
 134     lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
 135 
 136     // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
 137     // chunk is in use.
 138     const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
 139     if (chunk->word_size() == spec_chunk_size) {
 140       lines[1][pos] = chunk_is_free ? 'x' : 'X';
 141     } else if (chunk->word_size() == small_chunk_size) {
 142       lines[1][pos] = chunk_is_free ? 's' : 'S';
 143     } else if (chunk->word_size() == med_chunk_size) {
 144       lines[1][pos] = chunk_is_free ? 'm' : 'M';
 145     } else if (chunk->word_size() > med_chunk_size) {
 146       lines[1][pos] = chunk_is_free ? 'h' : 'H';
 147     } else {
 148       ShouldNotReachHere();
 149     }
 150 
 151     // Line 3: chunk origin
 152     const ChunkOrigin origin = chunk->get_origin();
 153     lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
 154 
 155     // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
 156     //         but were never used.
 157     lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
 158 
 159     p += spec_chunk_size;
 160     pos ++;
 161   }
 162   if (pos > 0) {
 163     for (int i = 0; i < NUM_LINES; i ++) {
 164       st->fill_to(22);
 165       st->print_raw(lines[i], line_len);
 166       st->cr();
 167     }
 168   }
 169   for (int i = 0; i < NUM_LINES; i ++) {
 170     os::free(lines[i]);
 171   }
 172 }
 173 


 174 
 175 #ifdef ASSERT

 176 
 177 // Verify counters, all chunks in this list node and the occupancy map.
 178 void VirtualSpaceNode::verify(bool slow) {
 179   log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s).",
 180     (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
 181   // Fast mode: just verify chunk counters and basic geometry
 182   // Slow mode: verify chunks and occupancy map
 183   uintx num_in_use_chunks = 0;
 184   Metachunk* chunk = first_chunk();
 185   Metachunk* invalid_chunk = (Metachunk*) top();
 186 
 187   // Iterate the chunks in this node and verify each chunk.
 188   while (chunk < invalid_chunk ) {
 189     if (slow) {
 190       do_verify_chunk(chunk);
 191     }
 192     if (!chunk->is_tagged_free()) {
 193       num_in_use_chunks ++;
 194     }
 195     const size_t s = chunk->word_size();
 196     // Prevent endless loop on invalid chunk size.
 197     assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT ".", s);
 198     MetaWord* next = ((MetaWord*)chunk) + s;
 199     chunk = (Metachunk*) next;
 200   }
 201   assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
 202       ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
 203   // Also verify the occupancy map.
 204   if (slow) {
 205     occupancy_map()->verify(bottom(), top());
 206   }
 207 }
 208 
 209 // Verify that all free chunks in this node are ideally merged
 210 // (there not should be multiple small chunks where a large chunk could exist.)
 211 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
 212   Metachunk* chunk = first_chunk();
 213   Metachunk* invalid_chunk = (Metachunk*) top();
 214   // Shorthands.
 215   const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
 216   const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
 217   int num_free_chunks_since_last_med_boundary = -1;
 218   int num_free_chunks_since_last_small_boundary = -1;
 219   bool error = false;
 220   char err[256];
 221   while (!error && chunk < invalid_chunk ) {
 222     // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
 223     // Reset the counter when encountering a non-free chunk.
 224     if (chunk->get_chunk_type() != HumongousIndex) {
 225       if (chunk->is_tagged_free()) {
 226         // Count successive free, non-humongous chunks.
 227         if (is_aligned(chunk, size_small)) {
 228           if (num_free_chunks_since_last_small_boundary > 0) {
 229             error = true;
 230             jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT ".", p2i(chunk));
 231           } else {
 232             num_free_chunks_since_last_small_boundary = 0;
 233           }
 234         } else if (num_free_chunks_since_last_small_boundary != -1) {
 235           num_free_chunks_since_last_small_boundary ++;
 236         }
 237         if (is_aligned(chunk, size_med)) {
 238           if (num_free_chunks_since_last_med_boundary > 0) {
 239             error = true;
 240             jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT ".", p2i(chunk));
 241           } else {
 242             num_free_chunks_since_last_med_boundary = 0;
 243           }
 244         } else if (num_free_chunks_since_last_med_boundary != -1) {
 245           num_free_chunks_since_last_med_boundary ++;
 246         }
 247       } else {
 248         // Encountering a non-free chunk, reset counters.
 249         num_free_chunks_since_last_med_boundary = -1;
 250         num_free_chunks_since_last_small_boundary = -1;
 251       }
 252     } else {
 253       // One cannot merge areas with a humongous chunk in the middle. Reset counters.
 254       num_free_chunks_since_last_med_boundary = -1;
 255       num_free_chunks_since_last_small_boundary = -1;
 256     }

 257 
 258     if (error) {
 259       print_map(tty, is_class());
 260       fatal("%s", err);
 261     }
 262 
 263     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 264     chunk = (Metachunk*) next;
 265   }
 266 }
 267 #endif // ASSERT
 268 
 269 void VirtualSpaceNode::inc_container_count() {
 270   assert_lock_strong(MetaspaceExpand_lock);
 271   _container_count++;
 272 }











 273 
 274 void VirtualSpaceNode::dec_container_count() {
 275   assert_lock_strong(MetaspaceExpand_lock);
 276   _container_count--;
 277 }
 278 
 279 VirtualSpaceNode::~VirtualSpaceNode() {
 280   _rs.release();
 281   if (_occupancy_map != NULL) {
 282     delete _occupancy_map;
 283   }
 284 #ifdef ASSERT
 285   size_t word_size = sizeof(*this) / BytesPerWord;
 286   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
 287 #endif
 288 }
 289 
 290 size_t VirtualSpaceNode::used_words_in_vs() const {
 291   return pointer_delta(top(), bottom(), sizeof(MetaWord));
 292 }
 293 
 294 // Space committed in the VirtualSpace
 295 size_t VirtualSpaceNode::capacity_words_in_vs() const {
 296   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 297 }
 298 
 299 size_t VirtualSpaceNode::free_words_in_vs() const {
 300   return pointer_delta(end(), top(), sizeof(MetaWord));
 301 }

 302 
 303 // Given an address larger than top(), allocate padding chunks until top is at the given address.
 304 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {

 305 
 306   assert(target_top > top(), "Sanity");


 307 
 308   // Padding chunks are added to the freelist.
 309   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());

 310 
 311   // shorthands
 312   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
 313   const size_t small_word_size = chunk_manager->small_chunk_word_size();
 314   const size_t med_word_size = chunk_manager->medium_chunk_word_size();

 315 
 316   while (top() < target_top) {

 317 
 318     // We could make this coding more generic, but right now we only deal with two possible chunk sizes
 319     // for padding chunks, so it is not worth it.
 320     size_t padding_chunk_word_size = small_word_size;
 321     if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
 322       assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
 323       padding_chunk_word_size = spec_word_size;
 324     }
 325     MetaWord* here = top();
 326     assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
 327     inc_top(padding_chunk_word_size);
 328 
 329     // Create new padding chunk.
 330     ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
 331     assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
 332 
 333     Metachunk* const padding_chunk =
 334         ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
 335     assert(padding_chunk == (Metachunk*)here, "Sanity");
 336     DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
 337     log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
 338         PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
 339         (is_class() ? "class space " : "metaspace"),
 340         p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
 341 
 342     // Mark chunk start in occupancy map.
 343     occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
 344 
 345     // Chunks are born as in-use (see MetaChunk ctor). So, before returning
 346     // the padding chunk to its chunk manager, mark it as in use (ChunkManager
 347     // will assert that).
 348     do_update_in_use_info_for_chunk(padding_chunk, true);
 349 
 350     // Return Chunk to freelist.
 351     inc_container_count();
 352     chunk_manager->return_single_chunk(padding_chunk);
 353     // Please note: at this point, ChunkManager::return_single_chunk()
 354     // may already have merged the padding chunk with neighboring chunks, so
 355     // it may have vanished at this point. Do not reference the padding
 356     // chunk beyond this point.
 357   }
 358 
 359   assert(top() == target_top, "Sanity");

 360 
 361 } // allocate_padding_chunks_until_top_is_at()

 362 
 363 // Allocates the chunk from the virtual space only.
 364 // This interface is also used internally for debugging.  Not all
 365 // chunks removed here are necessarily used for allocation.
 366 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
 367   // Non-humongous chunks are to be allocated aligned to their chunk
 368   // size. So, start addresses of medium chunks are aligned to medium
 369   // chunk size, those of small chunks to small chunk size and so
 370   // forth. This facilitates merging of free chunks and reduces
 371   // fragmentation. Chunk sizes are spec < small < medium, with each
 372   // larger chunk size being a multiple of the next smaller chunk
 373   // size.
 374   // Because of this alignment, me may need to create a number of padding
 375   // chunks. These chunks are created and added to the freelist.
 376 
 377   // The chunk manager to which we will give our padding chunks.
 378   ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
 379 
 380   // shorthands
 381   const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
 382   const size_t small_word_size = chunk_manager->small_chunk_word_size();
 383   const size_t med_word_size = chunk_manager->medium_chunk_word_size();
 384 
 385   assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
 386       chunk_word_size >= med_word_size, "Invalid chunk size requested.");
 387 
 388   // Chunk alignment (in bytes) == chunk size unless humongous.
 389   // Humongous chunks are aligned to the smallest chunk size (spec).
 390   const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
 391       spec_word_size : chunk_word_size) * sizeof(MetaWord);
 392 
 393   // Do we have enough space to create the requested chunk plus
 394   // any padding chunks needed?
 395   MetaWord* const next_aligned =
 396       static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
 397   if (!is_available((next_aligned - top()) + chunk_word_size)) {
 398     return NULL;
 399   }

 400 
 401   // Before allocating the requested chunk, allocate padding chunks if necessary.
 402   // We only need to do this for small or medium chunks: specialized chunks are the
 403   // smallest size, hence always aligned. Homungous chunks are allocated unaligned
 404   // (implicitly, also aligned to smallest chunk size).
 405   if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top())  {
 406     log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
 407         (is_class() ? "class space " : "metaspace"),
 408         top(), next_aligned);
 409     allocate_padding_chunks_until_top_is_at(next_aligned);
 410     // Now, top should be aligned correctly.
 411     assert_is_aligned(top(), required_chunk_alignment);
 412   }
 413 
 414   // Now, top should be aligned correctly.
 415   assert_is_aligned(top(), required_chunk_alignment);
 416 
 417   // Bottom of the new chunk
 418   MetaWord* chunk_limit = top();
 419   assert(chunk_limit != NULL, "Not safe to call this method");
 420 
 421   // The virtual spaces are always expanded by the
 422   // commit granularity to enforce the following condition.
 423   // Without this the is_available check will not work correctly.
 424   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
 425       "The committed memory doesn't match the expanded memory.");
 426 
 427   if (!is_available(chunk_word_size)) {
 428     LogTarget(Trace, gc, metaspace, freelist) lt;
 429     if (lt.is_enabled()) {
 430       LogStream ls(lt);
 431       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
 432       // Dump some information about the virtual space that is nearly full
 433       print_on(&ls);
 434     }
 435     return NULL;
 436   }





















 437 
 438   // Take the space  (bump top on the current virtual space).
 439   inc_top(chunk_word_size);
 440 
 441   // Initialize the chunk
 442   ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
 443   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
 444   assert(result == (Metachunk*)chunk_limit, "Sanity");
 445   occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
 446   do_update_in_use_info_for_chunk(result, true);



 447 
 448   inc_container_count();

 449 
 450 #ifdef ASSERT
 451   EVERY_NTH(VerifyMetaspaceInterval)
 452     chunk_manager->locked_verify(true);
 453     verify(true);
 454   END_EVERY_NTH
 455   do_verify_chunk(result);
 456 #endif
 457 
 458   result->inc_use_count();


 459 
 460   return result;
 461 }
 462 
 463 
 464 // Expand the virtual space (commit more of the reserved space)
 465 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
 466   size_t min_bytes = min_words * BytesPerWord;
 467   size_t preferred_bytes = preferred_words * BytesPerWord;
 468 
 469   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
 470 
 471   if (uncommitted < min_bytes) {
 472     return false;
 473   }


















 474 
 475   size_t commit = MIN2(preferred_bytes, uncommitted);
 476   bool result = virtual_space()->expand_by(commit, false);



 477 
 478   if (result) {
 479     log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
 480         (is_class() ? "class" : "non-class"), commit);
 481     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
 482   } else {
 483     log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
 484         (is_class() ? "class" : "non-class"), commit);
 485   }
 486 
 487   assert(result, "Failed to commit memory");
 488 
 489   return result;
 490 }
 491 
 492 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {

 493   assert_lock_strong(MetaspaceExpand_lock);
 494   Metachunk* result = take_from_committed(chunk_word_size);
 495   return result;
 496 }
 497 
 498 bool VirtualSpaceNode::initialize() {


 499 
 500   if (!_rs.is_reserved()) {
 501     return false;
 502   }
 503 
 504   // These are necessary restriction to make sure that the virtual space always
 505   // grows in steps of Metaspace::commit_alignment(). If both base and size are
 506   // aligned only the middle alignment of the VirtualSpace is used.
 507   assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
 508   assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
 509 
 510   // ReservedSpaces marked as special will have the entire memory
 511   // pre-committed. Setting a committed size will make sure that
 512   // committed_size and actual_committed_size agrees.
 513   size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
 514 
 515   bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
 516       Metaspace::commit_alignment());
 517   if (result) {
 518     assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
 519         "Checking that the pre-committed memory was registered by the VirtualSpace");
 520 
 521     set_top((MetaWord*)virtual_space()->low());
 522   }
 523 
 524   // Initialize Occupancy Map.
 525   const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
 526   _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
 527 
 528   return result;
 529 }
 530 
 531 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
 532   size_t used_words = used_words_in_vs();
 533   size_t commit_words = committed_words();
 534   size_t res_words = reserved_words();
 535   VirtualSpace* vs = virtual_space();
 536 
 537   st->print("node @" PTR_FORMAT ": ", p2i(this));
 538   st->print("reserved=");
 539   print_scaled_words(st, res_words, scale);
 540   st->print(", committed=");
 541   print_scaled_words_and_percentage(st, commit_words, res_words, scale);
 542   st->print(", used=");
 543   print_scaled_words_and_percentage(st, used_words, res_words, scale);

 544   st->cr();
 545   st->print("   [" PTR_FORMAT ", " PTR_FORMAT ", "
 546       PTR_FORMAT ", " PTR_FORMAT ")",
 547       p2i(bottom()), p2i(top()), p2i(end()),
 548       p2i(vs->high_boundary()));





 549 }
 550 

 551 #ifdef ASSERT
 552 void VirtualSpaceNode::mangle() {
 553   size_t word_size = capacity_words_in_vs();
 554   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);













 555 }
 556 #endif // ASSERT
 557 
 558 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
 559   assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
 560 #ifdef ASSERT
 561   verify(false);
 562   EVERY_NTH(VerifyMetaspaceInterval)
 563     verify(true);
 564   END_EVERY_NTH
 565 #endif
 566   for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
 567     ChunkIndex index = (ChunkIndex)i;
 568     size_t chunk_size = chunk_manager->size_by_index(index);
 569 
 570     while (free_words_in_vs() >= chunk_size) {
 571       Metachunk* chunk = get_chunk_vs(chunk_size);
 572       // Chunk will be allocated aligned, so allocation may require
 573       // additional padding chunks. That may cause above allocation to
 574       // fail. Just ignore the failed allocation and continue with the
 575       // next smaller chunk size. As the VirtualSpaceNode comitted
 576       // size should be a multiple of the smallest chunk size, we
 577       // should always be able to fill the VirtualSpace completely.
 578       if (chunk == NULL) {
 579         break;
 580       }
 581       chunk_manager->return_single_chunk(chunk);
 582     }
 583   }
 584   assert(free_words_in_vs() == 0, "should be empty now");
 585 }


 586 
 587 } // namespace metaspace
 588 


   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "logging/log.hpp"
  29 
  30 #include "memory/metaspace/chunkLevel.hpp"
  31 #include "memory/metaspace/chunkTree.hpp"
  32 #include "memory/metaspace/commitLimiter.hpp"
  33 #include "memory/metaspace/constants.hpp"
  34 #include "memory/metaspace/counter.hpp"
  35 #include "memory/metaspace/metachunk.hpp"



  36 #include "memory/metaspace/metaspaceCommon.hpp"
  37 #include "memory/metaspace/runningCounters.hpp"
  38 #include "memory/metaspace/virtualSpaceNode.hpp"
  39 
  40 #include "runtime/mutexLocker.hpp"
  41 #include "runtime/os.hpp"
  42 
  43 #include "utilities/align.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 #include "utilities/ostream.hpp"
  47 
  48 namespace metaspace {
  49 
  50 #ifdef ASSERT
  51 template <class T>
  52 void check_is_aligned_to_commit_granule(T x) {
  53   assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer");








  54 }
  55 #endif
  56 
  57 // Given an address range, ensure it is committed.
  58 //
  59 // The range has to be aligned to granule size.
  60 //
  61 // Function will:
  62 // - check how many granules in that region are uncommitted; If all are committed, it
  63 //    returns true immediately.
  64 // - check if committing those uncommitted granules would bring us over the commit limit
  65 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
  66 // - commit the memory.
  67 // - mark the range as committed in the commit mask
  68 //
  69 // Returns true if success, false if it did hit a commit limit.
  70 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
  71 
  72   DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
  73   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
  74   assert_lock_strong(MetaspaceExpand_lock);
  75 
  76   // First calculate how large the committed regions in this range are
  77   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
  78   DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)













  79 
  80   // By how much words we would increase commit charge
  81   //  were we to commit the given address range completely.
  82   const size_t commit_increase_words = word_size - committed_words_in_range;
  83 
  84   if (commit_increase_words == 0) {
  85     return true; // Already fully committed, nothing to do.
  86   }
  87 
  88   // Before committing any more memory, check limits.
  89   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
  90     return false;






























  91   }


  92 
  93   // Commit...
  94   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
  95     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");










  96   }
  97 
  98   log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.",
  99                            commit_increase_words * BytesPerWord);




















 100 
 101   // ... tell commit limiter...
 102   _commit_limiter->increase_committed(commit_increase_words);
 103 
 104   // ... update counters in containing vslist ...
 105   _total_committed_words_counter->increment_by(commit_increase_words);
 106 
 107   // ... and update the commit mask.
 108   _commit_mask.mark_range_as_committed(p, word_size);





























 109 
 110 #ifdef ASSERT
 111   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 112   // in both class and non-class vslist (outside gtests).
 113   if (_commit_limiter == CommitLimiter::globalLimiter()) {
 114     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");










































 115   }
 116 #endif
 117 
 118   return true;



 119 



 120 }

 121 
 122 // Given an address range, ensure it is committed.
 123 //
 124 // The range does not have to be aligned to granule size. However, the function will always commit
 125 // whole granules.
 126 //
 127 // Function will:
 128 // - check how many granules in that region are uncommitted; If all are committed, it
 129 //    returns true immediately.
 130 // - check if committing those uncommitted granules would bring us over the commit limit
 131 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 132 // - commit the memory.
 133 // - mark the range as committed in the commit mask
 134 //
 135 // Returns true if success, false if it did hit a commit limit.
 136 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
 137 

 138   assert_lock_strong(MetaspaceExpand_lock);
 139   assert(p != NULL && word_size > 0, "Sanity");

 140 
 141   MetaWord* p_start = align_down(p, constants::commit_granule_bytes);
 142   MetaWord* p_end = align_up(p + word_size, constants::commit_granule_bytes);








 143 
 144   // Todo: simple for now. Make it more intelligent late
 145   return commit_range(p_start, p_end - p_start);

 146 



 147 }
 148 
 149 // Given an address range (which has to be aligned to commit granule size):
 150 //  - uncommit it
 151 //  - mark it as uncommitted in the commit mask
 152 bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
 153 
 154   DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
 155   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
 156   assert_lock_strong(MetaspaceExpand_lock);
 157 
 158   // First calculate how large the committed regions in this range are
 159   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 160   DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)
 161 
 162   if (committed_words_in_range == 0) {
 163     return true; // Already fully uncommitted, nothing to do.
 164   }
 165 
 166   // Uncommit...
 167   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
 168     // Note: this can actually happen, since uncommit may increase the number of mappings.
 169     fatal("Failed to uncommit metaspace.");
 170   }
 171 
 172   log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.",
 173                             committed_words_in_range * BytesPerWord);
 174 
 175   // ... tell commit limiter...
 176   _commit_limiter->decrease_committed(committed_words_in_range);






































 177 
 178   // ... and global counters...
 179   _total_committed_words_counter->decrement_by(committed_words_in_range);
 180 
 181    // ... and update the commit mask.
 182   _commit_mask.mark_range_as_uncommitted(p, word_size);
 183 
 184 #ifdef ASSERT
 185   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 186   // in both class and non-class vslist (outside gtests).
 187   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
 188     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");































 189   }
 190 #endif
 191 
 192   return true;











 193 
 194 }

 195 
 196 //// creation, destruction ////
 197 
 198 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs,
 199                                    CommitLimiter* limiter,
 200                                    SizeCounter* reserve_counter,
 201                                    SizeCounter* commit_counter)
 202   : _next(NULL),
 203     _base(rs.base()),
 204     _word_size(rs.size() / BytesPerWord),
 205     _used_words(0),
 206     _commit_mask(rs.base(), rs.size() / BytesPerWord),
 207     _chunk_tree_array(rs.base(), rs.size() / BytesPerWord),
 208     _commit_limiter(limiter),
 209     _total_reserved_words_counter(reserve_counter),
 210     _total_committed_words_counter(commit_counter)
 211 {
 212   // Update reserved counter in vslist
 213   _total_reserved_words_counter->increment_by(_word_size);
 214 }
 215 
 216 // Create a node of a given size
 217 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
 218                                                 CommitLimiter* limiter,
 219                                                 SizeCounter* reserve_counter,
 220                                                 SizeCounter* commit_counter)
 221 {
 222 
 223   DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
 224 
 225   ReservedSpace rs(word_size * BytesPerWord,
 226                    constants::commit_granule_bytes,
 227                    false, // TODO deal with large pages
 228                    false);
 229 
 230   if (!rs.is_reserved()) {
 231     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
 232   }
 233 
 234   reserve_counter->increment_by(word_size * BytesPerWord);
 235 
 236   return create_node(rs, limiter, reserve_counter, commit_counter);
 237 
 238 }

 239 
 240 // Create a node over an existing space
 241 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs,
 242                                                 CommitLimiter* limiter,
 243                                                 SizeCounter* reserve_counter,
 244                                                 SizeCounter* commit_counter)
 245 {
 246   reserve_counter->increment_by(rs.size() * BytesPerWord);
 247   return new VirtualSpaceNode(rs, limiter, reserve_counter, commit_counter);
 248 }
 249 
 250 VirtualSpaceNode::~VirtualSpaceNode() {
 251   _rs.release();
 252 







 253 
 254   // Update counters in vslist
 255   _total_committed_words_counter->decrement_by(committed_words());
 256   _total_reserved_words_counter->decrement_by(_word_size);
 257 

 258 }
 259 
 260 




 261 
 262 //// Chunk allocation, splitting, merging /////
 263 
 264 // Allocate a root chunk from this node. Will fail and return NULL
 265 // if the node is full.
 266 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
 267 // Hence, before using this chunk, it must be committed.
 268 // Also, no limits are checked, since no committing takes place.
 269 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 270 
 271   assert_lock_strong(MetaspaceExpand_lock);
 272 
 273   assert_is_aligned(free_words, chklvl::MAX_CHUNK_WORD_SIZE);
 274 
 275   if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) {
 276 
 277     MetaWord* loc = _base + _used_words;
 278     _used_words += chklvl::MAX_CHUNK_WORD_SIZE;
 279 
 280     // Create a new chunk tree for that new root node.
 281     ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc);
 282 
 283     // Create a root chunk header and initialize it;
 284     Metachunk* c = tree->alloc_root_chunk_header();
 285 
 286     // Wire it to the memory.
 287     c->set_base(loc);
 288 
 289     DEBUG_ONLY(c->verify(true);)
 290     return c;
 291 







 292   }
 293 
 294   return NULL; // Node is full.
 295 

 296 }
 297 
 298 Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) {
 299 
 300   assert_lock_strong(MetaspaceExpand_lock);



 301 
 302   // Get the tree associated with this chunk and let it handle the splitting
 303   ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
 304   return tree->split(target_level, c, splinters);
 305 
 306 }


 307 
 308 Metachunk* VirtualSpaceNode::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) {















 309 
 310   assert_lock_strong(MetaspaceExpand_lock);

 311 
 312   // Get the tree associated with this chunk and let it handle the merging
 313   ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
 314   return tree->merge(c, num_merged);
 315 

 316 }
 317 
 318 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {




 319 
 320   st->print("node @" PTR_FORMAT ": ", p2i(this));
 321   st->print("reserved=");
 322   print_scaled_words(st, word_size(), scale);
 323   st->print(", committed=");
 324   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
 325   st->print(", used=");
 326   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
 327 
 328   st->cr();
 329 
 330   st->print_cr("   [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
 331       p2i(base()), p2i(base() + used_words()), p2i(base() + word_size()));
 332 
 333   st->print("Tree/Chunk footprint: ");
 334   print_scaled_words(st, _chunk_tree_array.memory_footprint_words(), scale);
 335 
 336   st->cr();
 337 
 338 }
 339 
 340 
 341 #ifdef ASSERT
 342 // Verify counters and basic structure. Slow mode: verify all chunks in depth
 343 void VirtualSpaceNode::verify(bool slow) const {
 344 
 345   assert_lock_strong(MetaspaceExpand_lock);
 346 
 347   assert(base() != NULL, "Invalid base");
 348   assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE);
 349   assert(used_words() < word_size(), "Sanity");
 350 
 351   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
 352   // to root chunk size.
 353   assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE);
 354 
 355   _commit_mask.verify(slow);
 356   _chunk_tree_array.verify(slow);
 357 
 358 }

 359 
 360 // Returns sum of committed space, in words.
 361 size_t VirtualSpaceNode::committed_words() const {
 362   return _commit_mask.get_committed_size();
























 363 }
 364 #endif
 365 
 366 
 367 } // namespace metaspace
 368 
< prev index next >