1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "logging/log.hpp"
  27 #include "logging/logStream.hpp"
  28 #include "memory/metaspace/chunkManager.hpp"
  29 #include "memory/metaspace/metachunk.hpp"
  30 #include "memory/metaspace/metaDebug.hpp"
  31 #include "memory/metaspace/metaspaceCommon.hpp"
  32 #include "memory/metaspace/spaceManager.hpp"
  33 #include "memory/metaspace/virtualSpaceList.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/init.hpp"
  36 #include "services/memoryService.hpp"
  37 #include "utilities/debug.hpp"
  38 #include "utilities/globalDefinitions.hpp"
  39 
  40 namespace metaspace {
  41 
  42 #define assert_counter(expected_value, real_value, msg) \
  43   assert( (expected_value) == (real_value),             \
  44          "Counter mismatch (%s): expected " SIZE_FORMAT \
  45          ", but got: " SIZE_FORMAT ".", msg, expected_value, \
  46          real_value);
  47 
  48 // SpaceManager methods
  49 
  50 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
  51   size_t chunk_sizes[] = {
  52       specialized_chunk_size(is_class_space),
  53       small_chunk_size(is_class_space),
  54       medium_chunk_size(is_class_space)
  55   };
  56 
  57   // Adjust up to one of the fixed chunk sizes ...
  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::AnonymousMetaspaceType:  requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
  80     default:                                 requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:       requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::AnonymousMetaspaceType:  requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
  87     default:                                 requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for anonymous metadata space.
 118   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
 119   // rarely about 4K (64-bits JVM).
 120   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 121   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 122   // reduces space waste from 60+% to around 30%.
 123   if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 124       _mdtype == Metaspace::NonClassType &&
 125       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 126       word_size + Metachunk::overhead() <= SpecializedChunk) {
 127     return SpecializedChunk;
 128   }
 129 
 130   if (num_chunks_by_type(MediumIndex) == 0 &&
 131       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 132     chunk_word_size = (size_t) small_chunk_size();
 133     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 134       chunk_word_size = medium_chunk_size();
 135     }
 136   } else {
 137     chunk_word_size = medium_chunk_size();
 138   }
 139 
 140   // Might still need a humongous chunk.  Enforce
 141   // humongous allocations sizes to be aligned up to
 142   // the smallest chunk size.
 143   size_t if_humongous_sized_chunk =
 144     align_up(word_size + Metachunk::overhead(),
 145                   smallest_chunk_size());
 146   chunk_word_size =
 147     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
 148 
 149   assert(!SpaceManager::is_humongous(word_size) ||
 150          chunk_word_size == if_humongous_sized_chunk,
 151          "Size calculation is wrong, word_size " SIZE_FORMAT
 152          " chunk_word_size " SIZE_FORMAT,
 153          word_size, chunk_word_size);
 154   Log(gc, metaspace, alloc) log;
 155   if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
 156     log.debug("Metadata humongous allocation:");
 157     log.debug("  word_size " PTR_FORMAT, word_size);
 158     log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
 159     log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
 160   }
 161   return chunk_word_size;
 162 }
 163 
 164 void SpaceManager::track_metaspace_memory_usage() {
 165   if (is_init_completed()) {
 166     if (is_class()) {
 167       MemoryService::track_compressed_class_memory_usage();
 168     }
 169     MemoryService::track_metaspace_memory_usage();
 170   }
 171 }
 172 
 173 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
 174   assert_lock_strong(_lock);
 175   assert(vs_list()->current_virtual_space() != NULL,
 176          "Should have been set");
 177   assert(current_chunk() == NULL ||
 178          current_chunk()->allocate(word_size) == NULL,
 179          "Don't need to expand");
 180   MutexLockerEx cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 181 
 182   if (log_is_enabled(Trace, gc, metaspace, freelist)) {
 183     size_t words_left = 0;
 184     size_t words_used = 0;
 185     if (current_chunk() != NULL) {
 186       words_left = current_chunk()->free_word_size();
 187       words_used = current_chunk()->used_word_size();
 188     }
 189     log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
 190                                        word_size, words_used, words_left);
 191   }
 192 
 193   // Get another chunk
 194   size_t chunk_word_size = calc_chunk_size(word_size);
 195   Metachunk* next = get_new_chunk(chunk_word_size);
 196 
 197   MetaWord* mem = NULL;
 198 
 199   // If a chunk was available, add it to the in-use chunk list
 200   // and do an allocation from it.
 201   if (next != NULL) {
 202     // Add to this manager's list of chunks in use.
 203     // If the new chunk is humongous, it was created to serve a single large allocation. In that
 204     // case it usually makes no sense to make it the current chunk, since the next allocation would
 205     // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
 206     // good chunk which could be used for more normal allocations.
 207     bool make_current = true;
 208     if (next->get_chunk_type() == HumongousIndex &&
 209         current_chunk() != NULL) {
 210       make_current = false;
 211     }
 212     add_chunk(next, make_current);
 213     mem = next->allocate(word_size);
 214   }
 215 
 216   // Track metaspace memory usage statistic.
 217   track_metaspace_memory_usage();
 218 
 219   return mem;
 220 }
 221 
 222 void SpaceManager::print_on(outputStream* st) const {
 223   SpaceManagerStatistics stat;
 224   add_to_statistics(&stat); // will lock _lock.
 225   stat.print_on(st, 1*K, false);
 226 }
 227 
 228 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
 229                            Metaspace::MetaspaceType space_type,//
 230                            Mutex* lock) :
 231   _mdtype(mdtype),
 232   _space_type(space_type),
 233   _capacity_words(0),
 234   _used_words(0),
 235   _overhead_words(0),
 236   _block_freelists(NULL),
 237   _lock(lock),
 238   _chunk_list(NULL),
 239   _current_chunk(NULL)
 240 {
 241   Metadebug::init_allocation_fail_alot_count();
 242   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
 243   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
 244 }
 245 
 246 void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) {
 247 
 248   assert_lock_strong(MetaspaceExpand_lock);
 249 
 250   _capacity_words += new_chunk->word_size();
 251   _overhead_words += Metachunk::overhead();
 252   DEBUG_ONLY(new_chunk->verify());
 253   _num_chunks_by_type[new_chunk->get_chunk_type()] ++;
 254 
 255   // Adjust global counters:
 256   MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size());
 257   MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead());
 258 }
 259 
 260 void SpaceManager::account_for_allocation(size_t words) {
 261   // Note: we should be locked with the ClassloaderData-specific metaspace lock.
 262   // We may or may not be locked with the global metaspace expansion lock.
 263   assert_lock_strong(lock());
 264 
 265   // Add to the per SpaceManager totals. This can be done non-atomically.
 266   _used_words += words;
 267 
 268   // Adjust global counters. This will be done atomically.
 269   MetaspaceUtils::inc_used(mdtype(), words);
 270 }
 271 
 272 void SpaceManager::account_for_spacemanager_death() {
 273 
 274   assert_lock_strong(MetaspaceExpand_lock);
 275 
 276   MetaspaceUtils::dec_capacity(mdtype(), _capacity_words);
 277   MetaspaceUtils::dec_overhead(mdtype(), _overhead_words);
 278   MetaspaceUtils::dec_used(mdtype(), _used_words);
 279 }
 280 
 281 SpaceManager::~SpaceManager() {
 282 
 283   // This call this->_lock which can't be done while holding MetaspaceExpand_lock
 284   DEBUG_ONLY(verify_metrics());
 285 
 286   MutexLockerEx fcl(MetaspaceExpand_lock,
 287                     Mutex::_no_safepoint_check_flag);
 288 
 289   chunk_manager()->slow_locked_verify();
 290 
 291   account_for_spacemanager_death();
 292 
 293   Log(gc, metaspace, freelist) log;
 294   if (log.is_trace()) {
 295     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
 296     ResourceMark rm;
 297     LogStream ls(log.trace());
 298     locked_print_chunks_in_use_on(&ls);
 299     if (block_freelists() != NULL) {
 300       block_freelists()->print_on(&ls);
 301     }
 302   }
 303 
 304   // Add all the chunks in use by this space manager
 305   // to the global list of free chunks.
 306 
 307   // Follow each list of chunks-in-use and add them to the
 308   // free lists.  Each list is NULL terminated.
 309   chunk_manager()->return_chunk_list(chunk_list());
 310 #ifdef ASSERT
 311   _chunk_list = NULL;
 312   _current_chunk = NULL;
 313 #endif
 314 
 315   chunk_manager()->slow_locked_verify();
 316 
 317   if (_block_freelists != NULL) {
 318     delete _block_freelists;
 319   }
 320 }
 321 
 322 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
 323   assert_lock_strong(lock());
 324   // Allocations and deallocations are in raw_word_size
 325   size_t raw_word_size = get_allocation_word_size(word_size);
 326   // Lazily create a block_freelist
 327   if (block_freelists() == NULL) {
 328     _block_freelists = new BlockFreelist();
 329   }
 330   block_freelists()->return_block(p, raw_word_size);
 331   DEBUG_ONLY(Atomic::inc(&(g_internal_statistics.num_deallocs)));
 332 }
 333 
 334 // Adds a chunk to the list of chunks in use.
 335 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
 336 
 337   assert_lock_strong(_lock);
 338   assert(new_chunk != NULL, "Should not be NULL");
 339   assert(new_chunk->next() == NULL, "Should not be on a list");
 340 
 341   new_chunk->reset_empty();
 342 
 343   // Find the correct list and and set the current
 344   // chunk for that list.
 345   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
 346 
 347   if (make_current) {
 348     // If we are to make the chunk current, retire the old current chunk and replace
 349     // it with the new chunk.
 350     retire_current_chunk();
 351     set_current_chunk(new_chunk);
 352   }
 353 
 354   // Add the new chunk at the head of its respective chunk list.
 355   new_chunk->set_next(_chunk_list);
 356   _chunk_list = new_chunk;
 357 
 358   // Adjust counters.
 359   account_for_new_chunk(new_chunk);
 360 
 361   assert(new_chunk->is_empty(), "Not ready for reuse");
 362   Log(gc, metaspace, freelist) log;
 363   if (log.is_trace()) {
 364     log.trace("SpaceManager::added chunk: ");
 365     ResourceMark rm;
 366     LogStream ls(log.trace());
 367     new_chunk->print_on(&ls);
 368     chunk_manager()->locked_print_free_chunks(&ls);
 369   }
 370 }
 371 
 372 void SpaceManager::retire_current_chunk() {
 373   if (current_chunk() != NULL) {
 374     size_t remaining_words = current_chunk()->free_word_size();
 375     if (remaining_words >= SmallBlocks::small_block_min_size()) {
 376       MetaWord* ptr = current_chunk()->allocate(remaining_words);
 377       deallocate(ptr, remaining_words);
 378       account_for_allocation(remaining_words);
 379     }
 380   }
 381 }
 382 
 383 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
 384   // Get a chunk from the chunk freelist
 385   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
 386 
 387   if (next == NULL) {
 388     next = vs_list()->get_new_chunk(chunk_word_size,
 389                                     medium_chunk_bunch());
 390   }
 391 
 392   Log(gc, metaspace, alloc) log;
 393   if (log.is_debug() && next != NULL &&
 394       SpaceManager::is_humongous(next->word_size())) {
 395     log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
 396   }
 397 
 398   return next;
 399 }
 400 
 401 MetaWord* SpaceManager::allocate(size_t word_size) {
 402   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 403   size_t raw_word_size = get_allocation_word_size(word_size);
 404   BlockFreelist* fl =  block_freelists();
 405   MetaWord* p = NULL;
 406 
 407   DEBUG_ONLY(if (VerifyMetaspace) verify_metrics_locked());
 408 
 409   // Allocation from the dictionary is expensive in the sense that
 410   // the dictionary has to be searched for a size.  Don't allocate
 411   // from the dictionary until it starts to get fat.  Is this
 412   // a reasonable policy?  Maybe an skinny dictionary is fast enough
 413   // for allocations.  Do some profiling.  JJJ
 414   if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
 415     p = fl->get_block(raw_word_size);
 416     if (p != NULL) {
 417       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks));
 418     }
 419   }
 420   if (p == NULL) {
 421     p = allocate_work(raw_word_size);
 422   }
 423 
 424   return p;
 425 }
 426 
 427 // Returns the address of spaced allocated for "word_size".
 428 // This methods does not know about blocks (Metablocks)
 429 MetaWord* SpaceManager::allocate_work(size_t word_size) {
 430   assert_lock_strong(lock());
 431 #ifdef ASSERT
 432   if (Metadebug::test_metadata_failure()) {
 433     return NULL;
 434   }
 435 #endif
 436   // Is there space in the current chunk?
 437   MetaWord* result = NULL;
 438 
 439   if (current_chunk() != NULL) {
 440     result = current_chunk()->allocate(word_size);
 441   }
 442 
 443   if (result == NULL) {
 444     result = grow_and_allocate(word_size);
 445   }
 446 
 447   if (result != NULL) {
 448     account_for_allocation(word_size);
 449   }
 450 
 451   return result;
 452 }
 453 
 454 void SpaceManager::verify() {
 455   Metachunk* curr = chunk_list();
 456   while (curr != NULL) {
 457     DEBUG_ONLY(do_verify_chunk(curr);)
 458     assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
 459     curr = curr->next();
 460   }
 461 }
 462 
 463 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
 464   assert(is_humongous(chunk->word_size()) ||
 465          chunk->word_size() == medium_chunk_size() ||
 466          chunk->word_size() == small_chunk_size() ||
 467          chunk->word_size() == specialized_chunk_size(),
 468          "Chunk size is wrong");
 469   return;
 470 }
 471 
 472 void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const {
 473   assert_lock_strong(lock());
 474   Metachunk* chunk = chunk_list();
 475   while (chunk != NULL) {
 476     UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type());
 477     chunk_stat.add_num(1);
 478     chunk_stat.add_cap(chunk->word_size());
 479     chunk_stat.add_overhead(Metachunk::overhead());
 480     chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead());
 481     if (chunk != current_chunk()) {
 482       chunk_stat.add_waste(chunk->free_word_size());
 483     } else {
 484       chunk_stat.add_free(chunk->free_word_size());
 485     }
 486     chunk = chunk->next();
 487   }
 488   if (block_freelists() != NULL) {
 489     out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size());
 490   }
 491 }
 492 
 493 void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const {
 494   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 495   add_to_statistics_locked(out);
 496 }
 497 
 498 #ifdef ASSERT
 499 void SpaceManager::verify_metrics_locked() const {
 500   assert_lock_strong(lock());
 501 
 502   SpaceManagerStatistics stat;
 503   add_to_statistics_locked(&stat);
 504 
 505   UsedChunksStatistics chunk_stats = stat.totals();
 506 
 507   DEBUG_ONLY(chunk_stats.check_sanity());
 508 
 509   assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words");
 510   assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words");
 511   assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words");
 512 }
 513 
 514 void SpaceManager::verify_metrics() const {
 515   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 516   verify_metrics_locked();
 517 }
 518 #endif // ASSERT
 519 
 520 
 521 } // namespace metaspace
 522