1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2018, 2020 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 //#define LOG_PLEASE
  29 
  30 #include "metaspace/metaspaceTestsCommon.hpp"
  31 #include "metaspace/metaspaceTestContexts.hpp"
  32 #include "metaspace/metaspace_sparsearray.hpp"
  33 #include "utilities/ostream.hpp"
  34 
  35 
  36 // TODO: this class is very similar to MetaspaceArenaTestBed in test_metaspacearena_stress.cpp.
  37 // should be unified.
  38 class MetaspaceArenaTestHelper {
  39 
  40   MetaspaceTestContext& _helper;
  41 
  42   Mutex* _lock;
  43   const ArenaGrowthPolicy* _growth_policy;
  44   SizeAtomicCounter _used_words_counter;
  45   MetaspaceArena* _arena;
  46 
  47 public:
  48 
  49   MetaspaceArenaTestHelper(MetaspaceTestContext& helper, Metaspace::MetaspaceType space_type, bool is_class,
  50                          const char* name = "gtest-MetaspaceArena")
  51     : _helper(helper),
  52       _lock(NULL),
  53       _growth_policy(NULL),
  54       _used_words_counter(),
  55       _arena(NULL)
  56   {
  57     _growth_policy = ArenaGrowthPolicy::policy_for_space_type(space_type, is_class);
  58     _lock = new Mutex(Monitor::native, "gtest-MetaspaceArenaTest-lock", false, Monitor::_safepoint_check_never);
  59     // Lock during space creation, since this is what happens in the VM too
  60     //  (see ClassLoaderData::metaspace_non_null(), which we mimick here).
  61     {
  62       MutexLocker ml(_lock,  Mutex::_no_safepoint_check_flag);
  63       _arena = new MetaspaceArena(&_helper.cm(), _growth_policy, _lock, &_used_words_counter, name);
  64     }
  65     DEBUG_ONLY(_arena->verify(true));
  66   }
  67 
  68   ~MetaspaceArenaTestHelper() {
  69     delete_arena_with_tests();
  70     delete _lock;
  71   }
  72 
  73   const CommitLimiter& limiter() const { return _helper.commit_limiter(); }
  74   MetaspaceArena* arena() const { return _arena; }
  75   SizeAtomicCounter& used_words_counter() { return _used_words_counter; }
  76 
  77   // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
  78   // in non-void returning tests.
  79 
  80   void delete_arena_with_tests() {
  81     if (_arena != NULL) {
  82       size_t used_words_before = _used_words_counter.get();
  83       size_t committed_words_before = limiter().committed_words();
  84       DEBUG_ONLY(_arena->verify(true));
  85       delete _arena;
  86       _arena = NULL;
  87       size_t used_words_after = _used_words_counter.get();
  88       size_t committed_words_after = limiter().committed_words();
  89       ASSERT_0(used_words_after);
  90       if (Settings::uncommit_free_chunks()) {
  91         ASSERT_LE(committed_words_after, committed_words_before);
  92       } else {
  93         ASSERT_EQ(committed_words_after, committed_words_before);
  94       }
  95     }
  96   }
  97 
  98   void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
  99     _arena->usage_numbers(p_used, p_committed, p_capacity);
 100     if (p_used != NULL) {
 101       if (p_committed != NULL) {
 102         ASSERT_GE(*p_committed, *p_used);
 103       }
 104       // Since we own the used words counter, it should reflect our usage number 1:1
 105       ASSERT_EQ(_used_words_counter.get(), *p_used);
 106     }
 107     if (p_committed != NULL && p_capacity != NULL) {
 108       ASSERT_GE(*p_capacity, *p_committed);
 109     }
 110   }
 111 
 112   // Allocate; caller expects success; return pointer in *p_return_value
 113   void allocate_from_arena_with_tests_expect_success(MetaWord** p_return_value, size_t word_size) {
 114     allocate_from_arena_with_tests(p_return_value, word_size);
 115     ASSERT_NOT_NULL(*p_return_value);
 116   }
 117 
 118   // Allocate; caller expects success but is not interested in return value
 119   void allocate_from_arena_with_tests_expect_success(size_t word_size) {
 120     MetaWord* dummy = NULL;
 121     allocate_from_arena_with_tests_expect_success(&dummy, word_size);
 122   }
 123 
 124   // Allocate; caller expects failure
 125   void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
 126     MetaWord* dummy = NULL;
 127     allocate_from_arena_with_tests(&dummy, word_size);
 128     ASSERT_NULL(dummy);
 129   }
 130 
 131   // Allocate; it may or may not work; return value in *p_return_value
 132   void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
 133 
 134     // Note: usage_numbers walks all chunks in use and counts.
 135     size_t used = 0, committed = 0, capacity = 0;
 136     usage_numbers_with_test(&used, &committed, &capacity);
 137 
 138     size_t possible_expansion = limiter().possible_expansion_words();
 139 
 140     MetaWord* p = _arena->allocate(word_size);
 141 
 142     SOMETIMES(DEBUG_ONLY(_arena->verify(true);))
 143 
 144     size_t used2 = 0, committed2 = 0, capacity2 = 0;
 145     usage_numbers_with_test(&used2, &committed2, &capacity2);
 146 
 147     if (p == NULL) {
 148       // Allocation failed.
 149       if (Settings::new_chunks_are_fully_committed()) {
 150         ASSERT_LT(possible_expansion, MAX_CHUNK_WORD_SIZE);
 151       } else {
 152         ASSERT_LT(possible_expansion, word_size);
 153       }
 154 
 155       ASSERT_EQ(used, used2);
 156       ASSERT_EQ(committed, committed2);
 157       ASSERT_EQ(capacity, capacity2);
 158     } else {
 159       // Allocation succeeded. Should be correctly aligned.
 160       ASSERT_TRUE(is_aligned(p, sizeof(MetaWord)));
 161       // used: may go up or may not (since our request may have been satisfied from the freeblocklist
 162       //   whose content already counts as used).
 163       // committed: may go up, may not
 164       // capacity: ditto
 165       ASSERT_GE(used2, used);
 166       ASSERT_GE(committed2, committed);
 167       ASSERT_GE(capacity2, capacity);
 168     }
 169 
 170     *p_return_value = p;
 171   }
 172 
 173   // Allocate; it may or may not work; but caller does not care for the result value
 174   void allocate_from_arena_with_tests(size_t word_size) {
 175     MetaWord* dummy = NULL;
 176     allocate_from_arena_with_tests(&dummy, word_size);
 177   }
 178 
 179 
 180   void deallocate_with_tests(MetaWord* p, size_t word_size) {
 181     size_t used = 0, committed = 0, capacity = 0;
 182     usage_numbers_with_test(&used, &committed, &capacity);
 183 
 184     _arena->deallocate(p, word_size);
 185 
 186     SOMETIMES(DEBUG_ONLY(_arena->verify(true);))
 187 
 188     size_t used2 = 0, committed2 = 0, capacity2 = 0;
 189     usage_numbers_with_test(&used2, &committed2, &capacity2);
 190 
 191     // Nothing should have changed. Deallocated blocks are added to the free block list
 192     // which still counts as used.
 193     ASSERT_EQ(used2, used);
 194     ASSERT_EQ(committed2, committed);
 195     ASSERT_EQ(capacity2, capacity);
 196   }
 197 
 198 
 199 };
 200 
 201 
 202 static void test_basics(size_t commit_limit, bool is_micro) {
 203   MetaspaceTestContext msthelper(commit_limit);
 204   MetaspaceArenaTestHelper helper(msthelper, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
 205 
 206   helper.allocate_from_arena_with_tests(1);
 207   helper.allocate_from_arena_with_tests(128);
 208   helper.allocate_from_arena_with_tests(128 * K);
 209   helper.allocate_from_arena_with_tests(1);
 210   helper.allocate_from_arena_with_tests(128);
 211   helper.allocate_from_arena_with_tests(128 * K);
 212 }
 213 
 214 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
 215   test_basics(max_uintx, true);
 216 }
 217 
 218 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) {
 219   test_basics(256 * K, true);
 220 }
 221 
 222 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) {
 223   test_basics(max_uintx, false);
 224 }
 225 
 226 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) {
 227   test_basics(256 * K, false);
 228 }
 229 
 230 
 231 // Test: in a single undisturbed MetaspaceArena (so, we should have chunks enlarged in place)
 232 // we allocate a small amount, then the full amount possible. The sum of first and second
 233 // allocation bring us above root chunk size. This should work - chunk enlargement should
 234 // fail and a new root chunk should be allocated instead.
 235 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place) {
 236 
 237   if (Settings::use_allocation_guard()) {
 238     return;
 239   }
 240 
 241   MetaspaceTestContext msthelper;
 242   MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
 243   helper.allocate_from_arena_with_tests_expect_success(1);
 244   helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
 245   helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE / 2);
 246   helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
 247 }
 248 
 249 // Test allocating from smallest to largest chunk size, and one step beyond.
 250 // The first n allocations should happen in place, the ladder should open a new chunk.
 251 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_ladder_1) {
 252 
 253   if (Settings::use_allocation_guard()) {
 254     return;
 255   }
 256 
 257   MetaspaceTestContext msthelper;
 258   MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
 259   size_t size = MIN_CHUNK_WORD_SIZE;
 260   while (size <= MAX_CHUNK_WORD_SIZE) {
 261     helper.allocate_from_arena_with_tests_expect_success(size);
 262     size *= 2;
 263   }
 264   helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
 265 }
 266 
 267 // Same as MetaspaceArena_test_enlarge_in_place_ladder_1, but increase in *4 step size;
 268 // this way chunk-in-place-enlargement does not work and we should have new chunks at each allocation.
 269 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_ladder_2) {
 270 
 271   if (Settings::use_allocation_guard()) {
 272     return;
 273   }
 274 
 275   MetaspaceTestContext msthelper;
 276   MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
 277   size_t size = MIN_CHUNK_WORD_SIZE;
 278   while (size <= MAX_CHUNK_WORD_SIZE) {
 279     helper.allocate_from_arena_with_tests_expect_success(size);
 280     size *= 4;
 281   }
 282   helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
 283 }
 284 
 285 // Test the MetaspaceArenas' free block list:
 286 // Allocate, deallocate, then allocate the same block again. The second allocate should
 287 // reuse the deallocated block.
 288 TEST_VM(metaspace, MetaspaceArena_deallocate) {
 289   if (Settings::use_allocation_guard()) {
 290     return;
 291   }
 292   for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
 293     MetaspaceTestContext msthelper;
 294     MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
 295 
 296     MetaWord* p1 = NULL;
 297     helper.allocate_from_arena_with_tests_expect_success(&p1, s);
 298 
 299     size_t used1 = 0, capacity1 = 0;
 300     helper.usage_numbers_with_test(&used1, NULL, &capacity1);
 301     ASSERT_EQ(used1, s);
 302 
 303     helper.deallocate_with_tests(p1, s);
 304 
 305     size_t used2 = 0, capacity2 = 0;
 306     helper.usage_numbers_with_test(&used2, NULL, &capacity2);
 307     ASSERT_EQ(used1, used2);
 308     ASSERT_EQ(capacity2, capacity2);
 309 
 310     MetaWord* p2 = NULL;
 311     helper.allocate_from_arena_with_tests_expect_success(&p2, s);
 312 
 313     size_t used3 = 0, capacity3 = 0;
 314     helper.usage_numbers_with_test(&used3, NULL, &capacity3);
 315     ASSERT_EQ(used3, used2);
 316     ASSERT_EQ(capacity3, capacity2);
 317 
 318     // Actually, we should get the very same allocation back
 319     ASSERT_EQ(p1, p2);
 320   }
 321 }
 322 
 323 static void test_recover_from_commit_limit_hit() {
 324 
 325   if (Settings::new_chunks_are_fully_committed()) {
 326     return; // This would throw off the commit counting in this test.
 327   }
 328 
 329   // Test:
 330   // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
 331   // - One, while attempting to commit parts of its current chunk on demand,
 332   //   triggers the limit and cannot commit its chunk further.
 333   // - We release the other MetaspaceArena - its content is put back to the
 334   //   freelists.
 335   // - We re-attempt allocation from the first manager. It should now succeed.
 336   //
 337   // This means if the first MetaspaceArena may have to let go of its current chunk and
 338   // retire it and take a fresh chunk from the freelist.
 339 
 340   const size_t commit_limit = Settings::commit_granule_words() * 10;
 341   MetaspaceTestContext msthelper(commit_limit);
 342 
 343   // The first MetaspaceArena mimicks a micro loader. This will fill the free
 344   //  chunk list with very small chunks. We allocate from them in an interleaved
 345   //  way to cause fragmentation.
 346   MetaspaceArenaTestHelper helper1(msthelper, Metaspace::ReflectionMetaspaceType, false);
 347   MetaspaceArenaTestHelper helper2(msthelper, Metaspace::ReflectionMetaspaceType, false);
 348 
 349   // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
 350   // it gets a large initial chunk which is committed
 351   // on demand and we are likely to hit a commit limit while trying to expand it.
 352   MetaspaceArenaTestHelper helper3(msthelper, Metaspace::BootMetaspaceType, false);
 353 
 354   // Allocate space until we have below two but above one granule left
 355   size_t allocated_from_1_and_2 = 0;
 356   while (msthelper.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
 357       allocated_from_1_and_2 < commit_limit) {
 358     helper1.allocate_from_arena_with_tests_expect_success(1);
 359     helper2.allocate_from_arena_with_tests_expect_success(1);
 360     allocated_from_1_and_2 += 2;
 361   }
 362 
 363   // Now, allocating from helper3, creep up on the limit
 364   size_t allocated_from_3 = 0;
 365   MetaWord* p = NULL;
 366   while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != NULL) &&
 367          ++allocated_from_3 < Settings::commit_granule_words() * 2);
 368 
 369   EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
 370 
 371   // We expect the freelist to be empty of committed space...
 372   EXPECT_0(msthelper.cm().total_committed_word_size());
 373 
 374   //msthelper.cm().print_on(tty);
 375 
 376   // Release the first MetaspaceArena.
 377   helper1.delete_arena_with_tests();
 378 
 379   //msthelper.cm().print_on(tty);
 380 
 381   // Should have populated the freelist with committed space
 382   // We expect the freelist to be empty of committed space...
 383   EXPECT_GT(msthelper.cm().total_committed_word_size(), (size_t)0);
 384 
 385   // Repeat allocation from helper3, should now work.
 386   helper3.allocate_from_arena_with_tests_expect_success(1);
 387 
 388 }
 389 
 390 
 391 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
 392   test_recover_from_commit_limit_hit();
 393 }
 394 
 395 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
 396                                    size_t expected_starting_capacity,
 397                                    bool test_in_place_enlargement)
 398 {
 399 
 400   if (Settings::use_allocation_guard()) {
 401     return;
 402   }
 403 
 404   // From a MetaspaceArena in a clean room allocate tiny amounts;
 405   // watch it grow. Used/committed/capacity should not grow in
 406   // large jumps. Also, different types of MetaspaceArena should
 407   // have different initial capacities.
 408 
 409   MetaspaceTestContext msthelper;
 410   MetaspaceArenaTestHelper smhelper(msthelper, type, is_class, "Grower");
 411 
 412   MetaspaceArenaTestHelper smhelper_harrasser(msthelper, Metaspace::ReflectionMetaspaceType, true, "Harasser");
 413 
 414   size_t used = 0, committed = 0, capacity = 0;
 415   const size_t alloc_words = 16;
 416 
 417   smhelper.arena()->usage_numbers(&used, &committed, &capacity);
 418   ASSERT_0(used);
 419   ASSERT_0(committed);
 420   ASSERT_0(capacity);
 421 
 422   ///// First allocation //
 423 
 424   smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
 425 
 426   smhelper.arena()->usage_numbers(&used, &committed, &capacity);
 427 
 428   ASSERT_EQ(used, alloc_words);
 429   ASSERT_GE(committed, used);
 430   ASSERT_GE(capacity, committed);
 431 
 432   ASSERT_EQ(capacity, expected_starting_capacity);
 433 
 434   if (!(Settings::new_chunks_are_fully_committed() && type == Metaspace::BootMetaspaceType)) {
 435     // Initial commit charge for the whole context should be one granule
 436     ASSERT_EQ(msthelper.committed_words(), Settings::commit_granule_words());
 437     // Initial commit number for the arena should be less since - apart from boot loader - no
 438     //  space type has large initial chunks.
 439     ASSERT_LE(committed, Settings::commit_granule_words());
 440   }
 441 
 442   ///// subsequent allocations //
 443 
 444   DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
 445 
 446   size_t words_allocated = 0;
 447   int num_allocated = 0;
 448   const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
 449   size_t highest_capacity_jump = capacity;
 450   int num_capacity_jumps = 0;
 451 
 452   while (words_allocated < safety && num_capacity_jumps < 15) {
 453 
 454     // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
 455     // undisturbed; it will have all the place to grow. Otherwise allocate from a little
 456     // side arena to increase fragmentation.
 457     // (Note that this does not completely prevent in-place chunk enlargement but makes it
 458     //  rather improbable)
 459     if (!test_in_place_enlargement) {
 460       smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
 461     }
 462 
 463     smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
 464     words_allocated += alloc_words;
 465     num_allocated ++;
 466 
 467     size_t used2 = 0, committed2 = 0, capacity2 = 0;
 468 
 469     smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
 470 
 471     // used should not grow larger than what we allocated, plus possible overhead.
 472     ASSERT_GE(used2, used);
 473     ASSERT_LE(used2, used + alloc_words * 2);
 474     ASSERT_LE(used2, words_allocated + 100);
 475     used = used2;
 476 
 477     // A jump in committed words should not be larger than commit granule size.
 478     // It can be smaller, since the current chunk of the MetaspaceArena may be
 479     // smaller than a commit granule.
 480     // (Note: unless root chunks are born fully committed)
 481     ASSERT_GE(committed2, used2);
 482     ASSERT_GE(committed2, committed);
 483     const size_t committed_jump = committed2 - committed;
 484     if (committed_jump > 0 && !Settings::new_chunks_are_fully_committed()) {
 485       ASSERT_LE(committed_jump, Settings::commit_granule_words());
 486     }
 487     committed = committed2;
 488 
 489     // Capacity jumps: Test that arenas capacity does not grow too fast.
 490     ASSERT_GE(capacity2, committed2);
 491     ASSERT_GE(capacity2, capacity);
 492     const size_t capacity_jump = capacity2 - capacity;
 493     if (capacity_jump > 0) {
 494       LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
 495       if (capacity_jump > highest_capacity_jump) {
 496         /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
 497          * on allocation history. Need to rethink this.
 498         ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
 499         ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
 500         ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
 501         */
 502         highest_capacity_jump = capacity_jump;
 503       }
 504       num_capacity_jumps ++;
 505     }
 506 
 507     capacity = capacity2;
 508 
 509   }
 510 
 511   // After all this work, we should see an increase in number of chunk-in-place-enlargements
 512   //  (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
 513   //   complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
 514 #ifdef ASSERT
 515   if (test_in_place_enlargement) {
 516     const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
 517     ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
 518   }
 519 #endif
 520 }
 521 
 522 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
 523 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
 524   test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
 525                          word_size_for_level(CHUNK_LEVEL_1K), true);
 526 }
 527 
 528 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_not_inplace) {
 529   test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
 530                          word_size_for_level(CHUNK_LEVEL_1K), false);
 531 }
 532 
 533 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_inplace) {
 534   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true,
 535                          word_size_for_level(CHUNK_LEVEL_1K), true);
 536 }
 537 
 538 TEST_VM(metaspace, MetaspaceArena_growth_anon_c_not_inplace) {
 539   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, true,
 540                          word_size_for_level(CHUNK_LEVEL_1K), false);
 541 }
 542 
 543 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_inplace) {
 544   test_controlled_growth(Metaspace::StandardMetaspaceType, true,
 545                          word_size_for_level(CHUNK_LEVEL_2K), true);
 546 }
 547 
 548 TEST_VM(metaspace, MetaspaceArena_growth_standard_c_not_inplace) {
 549   test_controlled_growth(Metaspace::StandardMetaspaceType, true,
 550                          word_size_for_level(CHUNK_LEVEL_2K), false);
 551 }
 552 
 553 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
 554  * and too large, to make any reliable guess as toward chunks get enlarged in place.
 555 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_inplace) {
 556   test_controlled_growth(Metaspace::BootMetaspaceType, true,
 557                          word_size_for_level(CHUNK_LEVEL_1M), true);
 558 }
 559 
 560 TEST_VM(metaspace, MetaspaceArena_growth_boot_c_not_inplace) {
 561   test_controlled_growth(Metaspace::BootMetaspaceType, true,
 562                          word_size_for_level(CHUNK_LEVEL_1M), false);
 563 }
 564 */
 565 
 566 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_inplace) {
 567   test_controlled_growth(Metaspace::ReflectionMetaspaceType, false,
 568                          word_size_for_level(CHUNK_LEVEL_2K), true);
 569 }
 570 
 571 TEST_VM(metaspace, MetaspaceArena_growth_refl_nc_not_inplace) {
 572   test_controlled_growth(Metaspace::ReflectionMetaspaceType, false,
 573                          word_size_for_level(CHUNK_LEVEL_2K), false);
 574 }
 575 
 576 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_inplace) {
 577   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false,
 578                          word_size_for_level(CHUNK_LEVEL_1K), true);
 579 }
 580 
 581 TEST_VM(metaspace, MetaspaceArena_growth_anon_nc_not_inplace) {
 582   test_controlled_growth(Metaspace::ClassMirrorHolderMetaspaceType, false,
 583                          word_size_for_level(CHUNK_LEVEL_1K), false);
 584 }
 585 
 586 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_inplace) {
 587   test_controlled_growth(Metaspace::StandardMetaspaceType, false,
 588                          word_size_for_level(CHUNK_LEVEL_4K), true);
 589 }
 590 
 591 TEST_VM(metaspace, MetaspaceArena_growth_standard_nc_not_inplace) {
 592   test_controlled_growth(Metaspace::StandardMetaspaceType, false,
 593                          word_size_for_level(CHUNK_LEVEL_4K), false);
 594 }
 595 
 596 /* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare,
 597  * and too large, to make any reliable guess as toward chunks get enlarged in place.
 598 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) {
 599   test_controlled_growth(Metaspace::BootMetaspaceType, false,
 600                          word_size_for_level(CHUNK_LEVEL_4M), true);
 601 }
 602 
 603 TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) {
 604   test_controlled_growth(Metaspace::BootMetaspaceType, false,
 605                          word_size_for_level(CHUNK_LEVEL_4M), false);
 606 }
 607 */