1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "logging/log.hpp"
  27 #include "logging/logStream.hpp"
  28 #include "memory/metaspace/chunkManager.hpp"
  29 #include "memory/metaspace/metachunk.hpp"
  30 #include "memory/metaspace/metaDebug.hpp"
  31 #include "memory/metaspace/metaspaceCommon.hpp"
  32 #include "memory/metaspace/spaceManager.hpp"
  33 #include "memory/metaspace/virtualSpaceList.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/init.hpp"
  36 #include "services/memoryService.hpp"
  37 #include "utilities/debug.hpp"
  38 #include "utilities/globalDefinitions.hpp"
  39 
  40 namespace metaspace {
  41 
  42 #define assert_counter(expected_value, real_value, msg) \
  43   assert( (expected_value) == (real_value),             \
  44          "Counter mismatch (%s): expected " SIZE_FORMAT \
  45          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
  46          real_value);
  47 
  48 // SpaceManager methods
  49 
  50 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
  51   size_t chunk_sizes[] = {
  52       specialized_chunk_size(is_class_space),
  53       small_chunk_size(is_class_space),
  54       medium_chunk_size(is_class_space)
  55   };
  56 
  57   // Adjust up to one of the fixed chunk sizes ...
  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::ClassMirrorHolderMetaspaceType: requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
  80     default:                                        requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::ClassMirrorHolderMetaspaceType: requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
  87     default:                                        requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for hidden metadata space.
 118   // ClassMirrorHolder metadata space is usually small since it is used for
 119   // class loader data's whose life cycle is governed by one class such as a
 120   // non-strong hidden class or unsafe anonymous class.  The majority within 1K - 2K range and
 121   // rarely about 4K (64-bits JVM).
 122   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 123   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 124   // reduces space waste from 60+% to around 30%.
 125   if ((_space_type == Metaspace::ClassMirrorHolderMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 126       _mdtype == Metaspace::NonClassType &&
 127       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 128       word_size + Metachunk::overhead() <= SpecializedChunk) {
 129     return SpecializedChunk;
 130   }
 131 
 132   if (num_chunks_by_type(MediumIndex) == 0 &&
 133       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 134     chunk_word_size = (size_t) small_chunk_size();
 135     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 136       chunk_word_size = medium_chunk_size();
 137     }
 138   } else {
 139     chunk_word_size = medium_chunk_size();
 140   }
 141 
 142   // Might still need a humongous chunk.  Enforce
 143   // humongous allocations sizes to be aligned up to
 144   // the smallest chunk size.
 145   size_t if_humongous_sized_chunk =
 146     align_up(word_size + Metachunk::overhead(),
 147                   smallest_chunk_size());
 148   chunk_word_size =
 149     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
 150 
 151   assert(!SpaceManager::is_humongous(word_size) ||
 152          chunk_word_size == if_humongous_sized_chunk,
 153          "Size calculation is wrong, word_size " SIZE_FORMAT
 154          " chunk_word_size " SIZE_FORMAT,
 155          word_size, chunk_word_size);
 156   Log(gc, metaspace, alloc) log;
 157   if (log.is_trace() && SpaceManager::is_humongous(word_size)) {
 158     log.trace("Metadata humongous allocation:");
 159     log.trace("  word_size " PTR_FORMAT, word_size);
 160     log.trace("  chunk_word_size " PTR_FORMAT, chunk_word_size);
 161     log.trace("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
 162   }
 163   return chunk_word_size;
 164 }
 165 
 166 void SpaceManager::track_metaspace_memory_usage() {
 167   if (is_init_completed()) {
 168     if (is_class()) {
 169       MemoryService::track_compressed_class_memory_usage();
 170     }
 171     MemoryService::track_metaspace_memory_usage();
 172   }
 173 }
 174 
 175 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
 176   assert_lock_strong(_lock);
 177   assert(vs_list()->current_virtual_space() != NULL,
 178          "Should have been set");
 179   assert(current_chunk() == NULL ||
 180          current_chunk()->allocate(word_size) == NULL,
 181          "Don't need to expand");
 182   MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 183 
 184   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
 185     size_t words_left = 0;
 186     size_t words_used = 0;
 187     if (current_chunk() != NULL) {
 188       words_left = current_chunk()->free_word_size();
 189       words_used = current_chunk()->used_word_size();
 190     }
 191     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
 192                                        word_size, words_used, words_left);
 193   }
 194 
 195   // Get another chunk
 196   size_t chunk_word_size = calc_chunk_size(word_size);
 197   Metachunk* next = get_new_chunk(chunk_word_size);
 198 
 199   MetaWord* mem = NULL;
 200 
 201   // If a chunk was available, add it to the in-use chunk list
 202   // and do an allocation from it.
 203   if (next != NULL) {
 204     // Add to this manager's list of chunks in use.
 205     // If the new chunk is humongous, it was created to serve a single large allocation. In that
 206     // case it usually makes no sense to make it the current chunk, since the next allocation would
 207     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
 208     // good chunk which could be used for more normal allocations.
 209     bool make_current = true;
 210     if (next->get_chunk_type() == HumongousIndex &&
 211         current_chunk() != NULL) {
 212       make_current = false;
 213     }
 214     add_chunk(next, make_current);
 215     mem = next->allocate(word_size);
 216   }
 217 
 218   // Track metaspace memory usage statistic.
 219   track_metaspace_memory_usage();
 220 
 221   return mem;
 222 }
 223 
 224 void SpaceManager::print_on(outputStream* st) const {
 225   SpaceManagerStatistics stat;
 226   add_to_statistics(&stat); // will lock _lock.
 227   stat.print_on(st, 1*K, false);
 228 }
 229 
 230 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
 231                            Metaspace::MetaspaceType space_type,//
 232                            Mutex* lock) :
 233   _lock(lock),
 234   _mdtype(mdtype),
 235   _space_type(space_type),
 236   _chunk_list(NULL),
 237   _current_chunk(NULL),
 238   _overhead_words(0),
 239   _capacity_words(0),
 240   _used_words(0),
 241   _block_freelists(NULL) {
 242   Metadebug::init_allocation_fail_alot_count();
 243   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
 244   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
 245 }
 246 
 247 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
 248 
 249   assert_lock_strong(MetaspaceExpand_lock);
 250 
 251   _capacity_words += new_chunk->word_size();
 252   _overhead_words += Metachunk::overhead();
 253   DEBUG_ONLY(new_chunk->verify());
 254   _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
 255 
 256   // Adjust global counters:
 257   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
 258   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
 259 }
 260 
 261 void SpaceManager::account_for_allocation(size_t words) {
 262   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
 263   // We may or may not be locked with the global metaspace expansion lock.
 264   assert_lock_strong(lock());
 265 
 266   // Add to the per SpaceManager totals. This can be done non-atomically.
 267   _used_words += words;
 268 
 269   // Adjust global counters. This will be done atomically.
 270   MetaspaceUtils::inc_used(mdtype(), words);
 271 }
 272 
 273 void SpaceManager::account_for_spacemanager_death() {
 274 
 275   assert_lock_strong(MetaspaceExpand_lock);
 276 
 277   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
 278   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
 279   MetaspaceUtils::dec_used(mdtype(), _used_words);
 280 }
 281 
 282 SpaceManager::~SpaceManager() {
 283 
 284   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
 285   DEBUG_ONLY(verify_metrics());
 286 
 287   MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 288 
 289   account_for_spacemanager_death();
 290 
 291   Log(gc, metaspace, freelist) log;
 292   if (log.is_trace()) {
 293     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
 294     ResourceMark rm;
 295     LogStream ls(log.trace());
 296     locked_print_chunks_in_use_on(&ls);
 297     if (block_freelists() != NULL) {
 298       block_freelists()->print_on(&ls);
 299     }
 300   }
 301 
 302   // Add all the chunks in use by this space manager
 303   // to the global list of free chunks.
 304 
 305   // Follow each list of chunks-in-use and add them to the
 306   // free lists.  Each list is NULL terminated.
 307   chunk_manager()->return_chunk_list(chunk_list());
 308 #ifdef ASSERT
 309   _chunk_list = NULL;
 310   _current_chunk = NULL;
 311 #endif
 312 
 313 #ifdef ASSERT
 314   EVERY_NTH(VerifyMetaspaceInterval)
 315     chunk_manager()->locked_verify(true);
 316   END_EVERY_NTH
 317 #endif
 318 
 319   if (_block_freelists != NULL) {
 320     delete _block_freelists;
 321   }
 322 }
 323 
 324 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
 325   assert_lock_strong(lock());
 326   // Allocations and deallocations are in raw_word_size
 327   size_t raw_word_size = get_allocation_word_size(word_size);
 328   // Lazily create a block_freelist
 329   if (block_freelists() == NULL) {
 330     _block_freelists = new BlockFreelist();
 331   }
 332   block_freelists()->return_block(p, raw_word_size);
 333   DEBUG_ONLY(Atomic::inc(&(g_internal_statistics.num_deallocs)));
 334 }
 335 
 336 // Adds a chunk to the list of chunks in use.
 337 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
 338 
 339   assert_lock_strong(_lock);
 340   assert(new_chunk != NULL, "Should not be NULL");
 341   assert(new_chunk->next() == NULL, "Should not be on a list");
 342 
 343   new_chunk->reset_empty();
 344 
 345   // Find the correct list and and set the current
 346   // chunk for that list.
 347   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
 348 
 349   if (make_current) {
 350     // If we are to make the chunk current, retire the old current chunk and replace
 351     // it with the new chunk.
 352     retire_current_chunk();
 353     set_current_chunk(new_chunk);
 354   }
 355 
 356   // Add the new chunk at the head of its respective chunk list.
 357   new_chunk->set_next(_chunk_list);
 358   _chunk_list = new_chunk;
 359 
 360   // Adjust counters.
 361   account_for_new_chunk(new_chunk);
 362 
 363   assert(new_chunk->is_empty(), "Not ready for reuse");
 364   Log(gc, metaspace, freelist) log;
 365   if (log.is_trace()) {
 366     log.trace("SpaceManager::added chunk: ");
 367     ResourceMark rm;
 368     LogStream ls(log.trace());
 369     new_chunk->print_on(&ls);
 370     chunk_manager()->locked_print_free_chunks(&ls);
 371   }
 372 }
 373 
 374 void SpaceManager::retire_current_chunk() {
 375   if (current_chunk() != NULL) {
 376     size_t remaining_words = current_chunk()->free_word_size();
 377     if (remaining_words >= SmallBlocks::small_block_min_size()) {
 378       MetaWord* ptr = current_chunk()->allocate(remaining_words);
 379       deallocate(ptr, remaining_words);
 380       account_for_allocation(remaining_words);
 381     }
 382   }
 383 }
 384 
 385 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
 386   // Get a chunk from the chunk freelist
 387   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
 388 
 389   if (next == NULL) {
 390     next = vs_list()->get_new_chunk(chunk_word_size,
 391                                     medium_chunk_bunch());
 392   }
 393 
 394   Log(gc, metaspace, alloc) log;
 395   if (log.is_trace() && next != NULL &&
 396       SpaceManager::is_humongous(next->word_size())) {
 397     log.trace("  new humongous chunk word size " PTR_FORMAT, next->word_size());
 398   }
 399 
 400   return next;
 401 }
 402 
 403 MetaWord* SpaceManager::allocate(size_t word_size) {
 404   MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
 405   size_t raw_word_size = get_allocation_word_size(word_size);
 406   BlockFreelist* fl =  block_freelists();
 407   MetaWord* p = NULL;
 408 
 409   // Allocation from the dictionary is expensive in the sense that
 410   // the dictionary has to be searched for a size.  Don't allocate
 411   // from the dictionary until it starts to get fat.  Is this
 412   // a reasonable policy?  Maybe an skinny dictionary is fast enough
 413   // for allocations.  Do some profiling.  JJJ
 414   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
 415     p = fl->get_block(raw_word_size);
 416     if (p != NULL) {
 417       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
 418     }
 419   }
 420   if (p == NULL) {
 421     p = allocate_work(raw_word_size);
 422   }
 423 
 424 #ifdef ASSERT
 425   EVERY_NTH(VerifyMetaspaceInterval)
 426     verify_metrics_locked();
 427   END_EVERY_NTH
 428 #endif
 429 
 430   return p;
 431 }
 432 
 433 // Returns the address of spaced allocated for "word_size".
 434 // This methods does not know about blocks (Metablocks)
 435 MetaWord* SpaceManager::allocate_work(size_t word_size) {
 436   assert_lock_strong(lock());
 437 #ifdef ASSERT
 438   if (Metadebug::test_metadata_failure()) {
 439     return NULL;
 440   }
 441 #endif
 442   // Is there space in the current chunk?
 443   MetaWord* result = NULL;
 444 
 445   if (current_chunk() != NULL) {
 446     result = current_chunk()->allocate(word_size);
 447   }
 448 
 449   if (result == NULL) {
 450     result = grow_and_allocate(word_size);
 451   }
 452 
 453   if (result != NULL) {
 454     account_for_allocation(word_size);
 455   }
 456 
 457   return result;
 458 }
 459 
 460 void SpaceManager::verify() {
 461   Metachunk* curr = chunk_list();
 462   while (curr != NULL) {
 463     DEBUG_ONLY(do_verify_chunk(curr);)
 464     assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
 465     curr = curr->next();
 466   }
 467 }
 468 
 469 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
 470   assert(is_humongous(chunk->word_size()) ||
 471          chunk->word_size() == medium_chunk_size() ||
 472          chunk->word_size() == small_chunk_size() ||
 473          chunk->word_size() == specialized_chunk_size(),
 474          "Chunk size is wrong");
 475   return;
 476 }
 477 
 478 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
 479   assert_lock_strong(lock());
 480   Metachunk* chunk = chunk_list();
 481   while (chunk != NULL) {
 482     UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
 483     chunk_stat.add_num(1);
 484     chunk_stat.add_cap(chunk->word_size());
 485     chunk_stat.add_overhead(Metachunk::overhead());
 486     chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
 487     if (chunk != current_chunk()) {
 488       chunk_stat.add_waste(chunk->free_word_size());
 489     } else {
 490       chunk_stat.add_free(chunk->free_word_size());
 491     }
 492     chunk = chunk->next();
 493   }
 494   if (block_freelists() != NULL) {
 495     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
 496   }
 497 }
 498 
 499 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
 500   MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
 501   add_to_statistics_locked(out);
 502 }
 503 
 504 #ifdef ASSERT
 505 void SpaceManager::verify_metrics_locked() const {
 506   assert_lock_strong(lock());
 507 
 508   SpaceManagerStatistics stat;
 509   add_to_statistics_locked(&stat);
 510 
 511   UsedChunksStatistics chunk_stats = stat.totals();
 512 
 513   DEBUG_ONLY(chunk_stats.check_sanity());
 514 
 515   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
 516   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
 517   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
 518 }
 519 
 520 void SpaceManager::verify_metrics() const {
 521   MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
 522   verify_metrics_locked();
 523 }
 524 #endif // ASSERT
 525 
 526 
 527 } // namespace metaspace
 528