1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "logging/log.hpp"
  27 #include "logging/logStream.hpp"
  28 #include "memory/metaspace/chunkManager.hpp"
  29 #include "memory/metaspace/metachunk.hpp"
  30 #include "memory/metaspace/metaDebug.hpp"
  31 #include "memory/metaspace/metaspaceCommon.hpp"
  32 #include "memory/metaspace/spaceManager.hpp"
  33 #include "memory/metaspace/virtualSpaceList.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/init.hpp"
  36 #include "services/memoryService.hpp"
  37 #include "utilities/debug.hpp"
  38 #include "utilities/globalDefinitions.hpp"
  39 
  40 namespace metaspace {
  41 namespace internals {
  42 
  43 #define assert_counter(expected_value, real_value, msg) \
  44   assert( (expected_value) == (real_value),             \
  45          "Counter mismatch (%s): expected " SIZE_FORMAT \
  46          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
  47          real_value);
  48 
  49 // SpaceManager methods
  50 
  51 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
  52   size_t chunk_sizes[] = {
  53       specialized_chunk_size(is_class_space),
  54       small_chunk_size(is_class_space),
  55       medium_chunk_size(is_class_space)
  56   };
  57 
  58   // Adjust up to one of the fixed chunk sizes ...
  59   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  60     if (requested <= chunk_sizes[i]) {
  61       return chunk_sizes[i];
  62     }
  63   }
  64 
  65   // ... or return the size as a humongous chunk.
  66   return requested;
  67 }
  68 
  69 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  70   return adjust_initial_chunk_size(requested, is_class());
  71 }
  72 
  73 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  74   size_t requested;
  75 
  76   if (is_class()) {
  77     switch (type) {
  78     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
  79     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
  80     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
  81     default:                                 requested = ClassSmallChunk; break;
  82     }
  83   } else {
  84     switch (type) {
  85     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
  86     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
  87     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
  88     default:                                 requested = SmallChunk; break;
  89     }
  90   }
  91 
  92   // Adjust to one of the fixed chunk sizes (unless humongous)
  93   const size_t adjusted = adjust_initial_chunk_size(requested);
  94 
  95   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  96          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  97 
  98   return adjusted;
  99 }
 100 
 101 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 102 
 103   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 104     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 105         num_chunks_by_type(i), chunk_size_name(i));
 106   }
 107 
 108   chunk_manager()->locked_print_free_chunks(st);
 109 }
 110 
 111 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 112 
 113   // Decide between a small chunk and a medium chunk.  Up to
 114   // _small_chunk_limit small chunks can be allocated.
 115   // After that a medium chunk is preferred.
 116   size_t chunk_word_size;
 117 
 118   // Special case for anonymous metadata space.
 119   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
 120   // rarely about 4K (64-bits JVM).
 121   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 122   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 123   // reduces space waste from 60+% to around 30%.
 124   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 125       _mdtype == Metaspace::NonClassType &&
 126       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 127       word_size + Metachunk::overhead() <= SpecializedChunk) {
 128     return SpecializedChunk;
 129   }
 130 
 131   if (num_chunks_by_type(MediumIndex) == 0 &&
 132       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 133     chunk_word_size = (size_t) small_chunk_size();
 134     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 135       chunk_word_size = medium_chunk_size();
 136     }
 137   } else {
 138     chunk_word_size = medium_chunk_size();
 139   }
 140 
 141   // Might still need a humongous chunk.  Enforce
 142   // humongous allocations sizes to be aligned up to
 143   // the smallest chunk size.
 144   size_t if_humongous_sized_chunk =
 145     align_up(word_size + Metachunk::overhead(),
 146                   smallest_chunk_size());
 147   chunk_word_size =
 148     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
 149 
 150   assert(!SpaceManager::is_humongous(word_size) ||
 151          chunk_word_size == if_humongous_sized_chunk,
 152          "Size calculation is wrong, word_size " SIZE_FORMAT
 153          " chunk_word_size " SIZE_FORMAT,
 154          word_size, chunk_word_size);
 155   Log(gc, metaspace, alloc) log;
 156   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
 157     log.debug("Metadata humongous allocation:");
 158     log.debug("  word_size " PTR_FORMAT, word_size);
 159     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
 160     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
 161   }
 162   return chunk_word_size;
 163 }
 164 
 165 void SpaceManager::track_metaspace_memory_usage() {
 166   if (is_init_completed()) {
 167     if (is_class()) {
 168       MemoryService::track_compressed_class_memory_usage();
 169     }
 170     MemoryService::track_metaspace_memory_usage();
 171   }
 172 }
 173 
 174 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
 175   assert_lock_strong(_lock);
 176   assert(vs_list()->current_virtual_space() != NULL,
 177          "Should have been set");
 178   assert(current_chunk() == NULL ||
 179          current_chunk()->allocate(word_size) == NULL,
 180          "Don't need to expand");
 181   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 182 
 183   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
 184     size_t words_left = 0;
 185     size_t words_used = 0;
 186     if (current_chunk() != NULL) {
 187       words_left = current_chunk()->free_word_size();
 188       words_used = current_chunk()->used_word_size();
 189     }
 190     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
 191                                        word_size, words_used, words_left);
 192   }
 193 
 194   // Get another chunk
 195   size_t chunk_word_size = calc_chunk_size(word_size);
 196   Metachunk* next = get_new_chunk(chunk_word_size);
 197 
 198   MetaWord* mem = NULL;
 199 
 200   // If a chunk was available, add it to the in-use chunk list
 201   // and do an allocation from it.
 202   if (next != NULL) {
 203     // Add to this manager's list of chunks in use.
 204     // If the new chunk is humongous, it was created to serve a single large allocation. In that
 205     // case it usually makes no sense to make it the current chunk, since the next allocation would
 206     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
 207     // good chunk which could be used for more normal allocations.
 208     bool make_current = true;
 209     if (next->get_chunk_type() == HumongousIndex &&
 210         current_chunk() != NULL) {
 211       make_current = false;
 212     }
 213     add_chunk(next, make_current);
 214     mem = next->allocate(word_size);
 215   }
 216 
 217   // Track metaspace memory usage statistic.
 218   track_metaspace_memory_usage();
 219 
 220   return mem;
 221 }
 222 
 223 void SpaceManager::print_on(outputStream* st) const {
 224   SpaceManagerStatistics stat;
 225   add_to_statistics(&stat); // will lock _lock.
 226   stat.print_on(st, 1*K, false);
 227 }
 228 
 229 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
 230                            Metaspace::MetaspaceType space_type,//
 231                            Mutex* lock) :
 232   _mdtype(mdtype),
 233   _space_type(space_type),
 234   _capacity_words(0),
 235   _used_words(0),
 236   _overhead_words(0),
 237   _block_freelists(NULL),
 238   _lock(lock),
 239   _chunk_list(NULL),
 240   _current_chunk(NULL)
 241 {
 242   Metadebug::init_allocation_fail_alot_count();
 243   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
 244   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
 245 }
 246 
 247 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
 248 
 249   assert_lock_strong(MetaspaceExpand_lock);
 250 
 251   _capacity_words += new_chunk->word_size();
 252   _overhead_words += Metachunk::overhead();
 253   DEBUG_ONLY(new_chunk->verify());
 254   _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
 255 
 256   // Adjust global counters:
 257   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
 258   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
 259 }
 260 
 261 void SpaceManager::account_for_allocation(size_t words) {
 262   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
 263   // We may or may not be locked with the global metaspace expansion lock.
 264   assert_lock_strong(lock());
 265 
 266   // Add to the per SpaceManager totals. This can be done non-atomically.
 267   _used_words += words;
 268 
 269   // Adjust global counters. This will be done atomically.
 270   MetaspaceUtils::inc_used(mdtype(), words);
 271 }
 272 
 273 void SpaceManager::account_for_spacemanager_death() {
 274 
 275   assert_lock_strong(MetaspaceExpand_lock);
 276 
 277   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
 278   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
 279   MetaspaceUtils::dec_used(mdtype(), _used_words);
 280 }
 281 
 282 SpaceManager::~SpaceManager() {
 283 
 284   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
 285   DEBUG_ONLY(verify_metrics());
 286 
 287   MutexLockerEx fcl(MetaspaceExpand_lock,
 288                     Mutex::_no_safepoint_check_flag);
 289 
 290   chunk_manager()->slow_locked_verify();
 291 
 292   account_for_spacemanager_death();
 293 
 294   Log(gc, metaspace, freelist) log;
 295   if (log.is_trace()) {
 296     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
 297     ResourceMark rm;
 298     LogStream ls(log.trace());
 299     locked_print_chunks_in_use_on(&ls);
 300     if (block_freelists() != NULL) {
 301       block_freelists()->print_on(&ls);
 302     }
 303   }
 304 
 305   // Add all the chunks in use by this space manager
 306   // to the global list of free chunks.
 307 
 308   // Follow each list of chunks-in-use and add them to the
 309   // free lists.  Each list is NULL terminated.
 310   chunk_manager()->return_chunk_list(chunk_list());
 311 #ifdef ASSERT
 312   _chunk_list = NULL;
 313   _current_chunk = NULL;
 314 #endif
 315 
 316   chunk_manager()->slow_locked_verify();
 317 
 318   if (_block_freelists != NULL) {
 319     delete _block_freelists;
 320   }
 321 }
 322 
 323 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
 324   assert_lock_strong(lock());
 325   // Allocations and deallocations are in raw_word_size
 326   size_t raw_word_size = get_allocation_word_size(word_size);
 327   // Lazily create a block_freelist
 328   if (block_freelists() == NULL) {
 329     _block_freelists = new BlockFreelist();
 330   }
 331   block_freelists()->return_block(p, raw_word_size);
 332   DEBUG_ONLY(Atomic::inc(&(g_internal_statistics.num_deallocs)));
 333 }
 334 
 335 // Adds a chunk to the list of chunks in use.
 336 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
 337 
 338   assert_lock_strong(_lock);
 339   assert(new_chunk != NULL, "Should not be NULL");
 340   assert(new_chunk->next() == NULL, "Should not be on a list");
 341 
 342   new_chunk->reset_empty();
 343 
 344   // Find the correct list and and set the current
 345   // chunk for that list.
 346   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
 347 
 348   if (make_current) {
 349     // If we are to make the chunk current, retire the old current chunk and replace
 350     // it with the new chunk.
 351     retire_current_chunk();
 352     set_current_chunk(new_chunk);
 353   }
 354 
 355   // Add the new chunk at the head of its respective chunk list.
 356   new_chunk->set_next(_chunk_list);
 357   _chunk_list = new_chunk;
 358 
 359   // Adjust counters.
 360   account_for_new_chunk(new_chunk);
 361 
 362   assert(new_chunk->is_empty(), "Not ready for reuse");
 363   Log(gc, metaspace, freelist) log;
 364   if (log.is_trace()) {
 365     log.trace("SpaceManager::added chunk: ");
 366     ResourceMark rm;
 367     LogStream ls(log.trace());
 368     new_chunk->print_on(&ls);
 369     chunk_manager()->locked_print_free_chunks(&ls);
 370   }
 371 }
 372 
 373 void SpaceManager::retire_current_chunk() {
 374   if (current_chunk() != NULL) {
 375     size_t remaining_words = current_chunk()->free_word_size();
 376     if (remaining_words >= SmallBlocks::small_block_min_size()) {
 377       MetaWord* ptr = current_chunk()->allocate(remaining_words);
 378       deallocate(ptr, remaining_words);
 379       account_for_allocation(remaining_words);
 380     }
 381   }
 382 }
 383 
 384 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
 385   // Get a chunk from the chunk freelist
 386   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
 387 
 388   if (next == NULL) {
 389     next = vs_list()->get_new_chunk(chunk_word_size,
 390                                     medium_chunk_bunch());
 391   }
 392 
 393   Log(gc, metaspace, alloc) log;
 394   if (log.is_debug() && next != NULL &&
 395       SpaceManager::is_humongous(next->word_size())) {
 396     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
 397   }
 398 
 399   return next;
 400 }
 401 
 402 MetaWord* SpaceManager::allocate(size_t word_size) {
 403   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 404   size_t raw_word_size = get_allocation_word_size(word_size);
 405   BlockFreelist* fl =  block_freelists();
 406   MetaWord* p = NULL;
 407 
 408   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
 409 
 410   // Allocation from the dictionary is expensive in the sense that
 411   // the dictionary has to be searched for a size.  Don't allocate
 412   // from the dictionary until it starts to get fat.  Is this
 413   // a reasonable policy?  Maybe an skinny dictionary is fast enough
 414   // for allocations.  Do some profiling.  JJJ
 415   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
 416     p = fl->get_block(raw_word_size);
 417     if (p != NULL) {
 418       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
 419     }
 420   }
 421   if (p == NULL) {
 422     p = allocate_work(raw_word_size);
 423   }
 424 
 425   return p;
 426 }
 427 
 428 // Returns the address of spaced allocated for "word_size".
 429 // This methods does not know about blocks (Metablocks)
 430 MetaWord* SpaceManager::allocate_work(size_t word_size) {
 431   assert_lock_strong(lock());
 432 #ifdef ASSERT
 433   if (Metadebug::test_metadata_failure()) {
 434     return NULL;
 435   }
 436 #endif
 437   // Is there space in the current chunk?
 438   MetaWord* result = NULL;
 439 
 440   if (current_chunk() != NULL) {
 441     result = current_chunk()->allocate(word_size);
 442   }
 443 
 444   if (result == NULL) {
 445     result = grow_and_allocate(word_size);
 446   }
 447 
 448   if (result != NULL) {
 449     account_for_allocation(word_size);
 450   }
 451 
 452   return result;
 453 }
 454 
 455 void SpaceManager::verify() {
 456   Metachunk* curr = chunk_list();
 457   while (curr != NULL) {
 458     DEBUG_ONLY(do_verify_chunk(curr);)
 459     assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
 460     curr = curr->next();
 461   }
 462 }
 463 
 464 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
 465   assert(is_humongous(chunk->word_size()) ||
 466          chunk->word_size() == medium_chunk_size() ||
 467          chunk->word_size() == small_chunk_size() ||
 468          chunk->word_size() == specialized_chunk_size(),
 469          "Chunk size is wrong");
 470   return;
 471 }
 472 
 473 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
 474   assert_lock_strong(lock());
 475   Metachunk* chunk = chunk_list();
 476   while (chunk != NULL) {
 477     UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
 478     chunk_stat.add_num(1);
 479     chunk_stat.add_cap(chunk->word_size());
 480     chunk_stat.add_overhead(Metachunk::overhead());
 481     chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
 482     if (chunk != current_chunk()) {
 483       chunk_stat.add_waste(chunk->free_word_size());
 484     } else {
 485       chunk_stat.add_free(chunk->free_word_size());
 486     }
 487     chunk = chunk->next();
 488   }
 489   if (block_freelists() != NULL) {
 490     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
 491   }
 492 }
 493 
 494 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
 495   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 496   add_to_statistics_locked(out);
 497 }
 498 
 499 #ifdef ASSERT
 500 void SpaceManager::verify_metrics_locked() const {
 501   assert_lock_strong(lock());
 502 
 503   SpaceManagerStatistics stat;
 504   add_to_statistics_locked(&stat);
 505 
 506   UsedChunksStatistics chunk_stats = stat.totals();
 507 
 508   DEBUG_ONLY(chunk_stats.check_sanity());
 509 
 510   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
 511   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
 512   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
 513 }
 514 
 515 void SpaceManager::verify_metrics() const {
 516   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 517   verify_metrics_locked();
 518 }
 519 #endif // ASSERT
 520 
 521 
 522 } // namespace metaspace
 523 } // namespace internals