1 /*
2 * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2018, 2020 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 //#define LOG_PLEASE
29
30 #include "metaspace/metaspaceTestsCommon.hpp"
31 #include "metaspace/metaspaceTestContexts.hpp"
32 #include "metaspace/metaspace_sparsearray.hpp"
33 #include "utilities/ostream.hpp"
34
35
36 // TODO: this class is very similar to MetaspaceArenaTestBed in test_metaspacearena_stress.cpp.
37 // should be unified.
38 class MetaspaceArenaTestHelper {
39
40 MetaspaceTestContext& _helper;
41
42 Mutex* _lock;
43 const ArenaGrowthPolicy* _growth_policy;
44 SizeAtomicCounter _used_words_counter;
45 MetaspaceArena* _arena;
46
47 public:
48
49 MetaspaceArenaTestHelper(MetaspaceTestContext& helper, Metaspace::MetaspaceType space_type, bool is_class,
50 const char* name = "gtest-MetaspaceArena")
51 : _helper(helper),
52 _lock(NULL),
53 _growth_policy(NULL),
54 _used_words_counter(),
55 _arena(NULL)
56 {
57 _growth_policy = ArenaGrowthPolicy::policy_for_space_type(space_type, is_class);
58 _lock = new Mutex(Monitor::native, "gtest-MetaspaceArenaTest-lock", false, Monitor::_safepoint_check_never);
59 // Lock during space creation, since this is what happens in the VM too
60 // (see ClassLoaderData::metaspace_non_null(), which we mimick here).
61 {
62 MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
63 _arena = new MetaspaceArena(&_helper.cm(), _growth_policy, _lock, &_used_words_counter, name);
64 }
65 DEBUG_ONLY(_arena->verify(true));
66 }
67
68 ~MetaspaceArenaTestHelper() {
69 delete_arena_with_tests();
70 delete _lock;
71 }
72
73 const CommitLimiter& limiter() const { return _helper.commit_limiter(); }
74 MetaspaceArena* arena() const { return _arena; }
75 SizeAtomicCounter& used_words_counter() { return _used_words_counter; }
76
77 // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
78 // in non-void returning tests.
79
80 void delete_arena_with_tests() {
81 if (_arena != NULL) {
82 size_t used_words_before = _used_words_counter.get();
83 size_t committed_words_before = limiter().committed_words();
84 DEBUG_ONLY(_arena->verify(true));
85 delete _arena;
86 _arena = NULL;
87 size_t used_words_after = _used_words_counter.get();
88 size_t committed_words_after = limiter().committed_words();
89 ASSERT_0(used_words_after);
90 if (Settings::uncommit_free_chunks()) {
91 ASSERT_LE(committed_words_after, committed_words_before);
92 } else {
93 ASSERT_EQ(committed_words_after, committed_words_before);
94 }
95 }
96 }
97
98 void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
99 _arena->usage_numbers(p_used, p_committed, p_capacity);
100 if (p_used != NULL) {
101 if (p_committed != NULL) {
102 ASSERT_GE(*p_committed, *p_used);
103 }
104 // Since we own the used words counter, it should reflect our usage number 1:1
122 }
123
124 // Allocate; caller expects failure
125 void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
126 MetaWord* dummy = NULL;
127 allocate_from_arena_with_tests(&dummy, word_size);
128 ASSERT_NULL(dummy);
129 }
130
131 // Allocate; it may or may not work; return value in *p_return_value
132 void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
133
134 // Note: usage_numbers walks all chunks in use and counts.
135 size_t used = 0, committed = 0, capacity = 0;
136 usage_numbers_with_test(&used, &committed, &capacity);
137
138 size_t possible_expansion = limiter().possible_expansion_words();
139
140 MetaWord* p = _arena->allocate(word_size);
141
142 SOMETIMES(DEBUG_ONLY(_arena->verify(true);))
143
144 size_t used2 = 0, committed2 = 0, capacity2 = 0;
145 usage_numbers_with_test(&used2, &committed2, &capacity2);
146
147 if (p == NULL) {
148 // Allocation failed.
149 if (Settings::new_chunks_are_fully_committed()) {
150 ASSERT_LT(possible_expansion, MAX_CHUNK_WORD_SIZE);
151 } else {
152 ASSERT_LT(possible_expansion, word_size);
153 }
154
155 ASSERT_EQ(used, used2);
156 ASSERT_EQ(committed, committed2);
157 ASSERT_EQ(capacity, capacity2);
158 } else {
159 // Allocation succeeded. Should be correctly aligned.
160 ASSERT_TRUE(is_aligned(p, sizeof(MetaWord)));
161 // used: may go up or may not (since our request may have been satisfied from the freeblocklist
162 // whose content already counts as used).
163 // committed: may go up, may not
164 // capacity: ditto
165 ASSERT_GE(used2, used);
166 ASSERT_GE(committed2, committed);
167 ASSERT_GE(capacity2, capacity);
168 }
169
170 *p_return_value = p;
171 }
172
173 // Allocate; it may or may not work; but caller does not care for the result value
174 void allocate_from_arena_with_tests(size_t word_size) {
175 MetaWord* dummy = NULL;
176 allocate_from_arena_with_tests(&dummy, word_size);
177 }
178
179
180 void deallocate_with_tests(MetaWord* p, size_t word_size) {
181 size_t used = 0, committed = 0, capacity = 0;
182 usage_numbers_with_test(&used, &committed, &capacity);
183
184 _arena->deallocate(p, word_size);
185
186 SOMETIMES(DEBUG_ONLY(_arena->verify(true);))
187
188 size_t used2 = 0, committed2 = 0, capacity2 = 0;
189 usage_numbers_with_test(&used2, &committed2, &capacity2);
190
191 // Nothing should have changed. Deallocated blocks are added to the free block list
192 // which still counts as used.
193 ASSERT_EQ(used2, used);
194 ASSERT_EQ(committed2, committed);
195 ASSERT_EQ(capacity2, capacity);
196 }
197
198
199 };
200
201
202 static void test_basics(size_t commit_limit, bool is_micro) {
203 MetaspaceTestContext msthelper(commit_limit);
204 MetaspaceArenaTestHelper helper(msthelper, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
205
206 helper.allocate_from_arena_with_tests(1);
207 helper.allocate_from_arena_with_tests(128);
208 helper.allocate_from_arena_with_tests(128 * K);
209 helper.allocate_from_arena_with_tests(1);
210 helper.allocate_from_arena_with_tests(128);
211 helper.allocate_from_arena_with_tests(128 * K);
212 }
213
214 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
215 test_basics(max_uintx, true);
216 }
217
218 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) {
219 test_basics(256 * K, true);
220 }
221
222 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) {
223 test_basics(max_uintx, false);
224 }
225
226 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) {
227 test_basics(256 * K, false);
228 }
229
230
231 // Test: in a single undisturbed MetaspaceArena (so, we should have chunks enlarged in place)
232 // we allocate a small amount, then the full amount possible. The sum of first and second
233 // allocation bring us above root chunk size. This should work - chunk enlargement should
234 // fail and a new root chunk should be allocated instead.
235 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place) {
236
237 if (Settings::use_allocation_guard()) {
238 return;
239 }
240
241 MetaspaceTestContext msthelper;
242 MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
243 helper.allocate_from_arena_with_tests_expect_success(1);
244 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
245 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE / 2);
246 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
247 }
248
249 // Test allocating from smallest to largest chunk size, and one step beyond.
250 // The first n allocations should happen in place, the ladder should open a new chunk.
251 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_ladder_1) {
252
253 if (Settings::use_allocation_guard()) {
254 return;
255 }
256
257 MetaspaceTestContext msthelper;
258 MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
259 size_t size = MIN_CHUNK_WORD_SIZE;
260 while (size <= MAX_CHUNK_WORD_SIZE) {
261 helper.allocate_from_arena_with_tests_expect_success(size);
262 size *= 2;
263 }
264 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
265 }
266
267 // Same as MetaspaceArena_test_enlarge_in_place_ladder_1, but increase in *4 step size;
268 // this way chunk-in-place-enlargement does not work and we should have new chunks at each allocation.
269 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_ladder_2) {
270
271 if (Settings::use_allocation_guard()) {
272 return;
273 }
274
275 MetaspaceTestContext msthelper;
276 MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
277 size_t size = MIN_CHUNK_WORD_SIZE;
278 while (size <= MAX_CHUNK_WORD_SIZE) {
279 helper.allocate_from_arena_with_tests_expect_success(size);
280 size *= 4;
281 }
282 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE);
283 }
284
285 // Test the MetaspaceArenas' free block list:
286 // Allocate, deallocate, then allocate the same block again. The second allocate should
287 // reuse the deallocated block.
288 TEST_VM(metaspace, MetaspaceArena_deallocate) {
289 if (Settings::use_allocation_guard()) {
290 return;
291 }
292 for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
293 MetaspaceTestContext msthelper;
294 MetaspaceArenaTestHelper helper(msthelper, Metaspace::StandardMetaspaceType, false);
295
296 MetaWord* p1 = NULL;
297 helper.allocate_from_arena_with_tests_expect_success(&p1, s);
298
299 size_t used1 = 0, capacity1 = 0;
300 helper.usage_numbers_with_test(&used1, NULL, &capacity1);
301 ASSERT_EQ(used1, s);
302
303 helper.deallocate_with_tests(p1, s);
304
305 size_t used2 = 0, capacity2 = 0;
306 helper.usage_numbers_with_test(&used2, NULL, &capacity2);
307 ASSERT_EQ(used1, used2);
308 ASSERT_EQ(capacity2, capacity2);
309
310 MetaWord* p2 = NULL;
311 helper.allocate_from_arena_with_tests_expect_success(&p2, s);
312
313 size_t used3 = 0, capacity3 = 0;
314 helper.usage_numbers_with_test(&used3, NULL, &capacity3);
321 }
322
323 static void test_recover_from_commit_limit_hit() {
324
325 if (Settings::new_chunks_are_fully_committed()) {
326 return; // This would throw off the commit counting in this test.
327 }
328
329 // Test:
330 // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
331 // - One, while attempting to commit parts of its current chunk on demand,
332 // triggers the limit and cannot commit its chunk further.
333 // - We release the other MetaspaceArena - its content is put back to the
334 // freelists.
335 // - We re-attempt allocation from the first manager. It should now succeed.
336 //
337 // This means if the first MetaspaceArena may have to let go of its current chunk and
338 // retire it and take a fresh chunk from the freelist.
339
340 const size_t commit_limit = Settings::commit_granule_words() * 10;
341 MetaspaceTestContext msthelper(commit_limit);
342
343 // The first MetaspaceArena mimicks a micro loader. This will fill the free
344 // chunk list with very small chunks. We allocate from them in an interleaved
345 // way to cause fragmentation.
346 MetaspaceArenaTestHelper helper1(msthelper, Metaspace::ReflectionMetaspaceType, false);
347 MetaspaceArenaTestHelper helper2(msthelper, Metaspace::ReflectionMetaspaceType, false);
348
349 // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
350 // it gets a large initial chunk which is committed
351 // on demand and we are likely to hit a commit limit while trying to expand it.
352 MetaspaceArenaTestHelper helper3(msthelper, Metaspace::BootMetaspaceType, false);
353
354 // Allocate space until we have below two but above one granule left
355 size_t allocated_from_1_and_2 = 0;
356 while (msthelper.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
357 allocated_from_1_and_2 < commit_limit) {
358 helper1.allocate_from_arena_with_tests_expect_success(1);
359 helper2.allocate_from_arena_with_tests_expect_success(1);
360 allocated_from_1_and_2 += 2;
361 }
362
363 // Now, allocating from helper3, creep up on the limit
364 size_t allocated_from_3 = 0;
365 MetaWord* p = NULL;
366 while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != NULL) &&
367 ++allocated_from_3 < Settings::commit_granule_words() * 2);
368
369 EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
370
371 // We expect the freelist to be empty of committed space...
372 EXPECT_0(msthelper.cm().total_committed_word_size());
373
374 //msthelper.cm().print_on(tty);
375
376 // Release the first MetaspaceArena.
377 helper1.delete_arena_with_tests();
378
379 //msthelper.cm().print_on(tty);
380
381 // Should have populated the freelist with committed space
382 // We expect the freelist to be empty of committed space...
383 EXPECT_GT(msthelper.cm().total_committed_word_size(), (size_t)0);
384
385 // Repeat allocation from helper3, should now work.
386 helper3.allocate_from_arena_with_tests_expect_success(1);
387
388 }
389
390
391 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
392 test_recover_from_commit_limit_hit();
393 }
394
395 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
396 size_t expected_starting_capacity,
397 bool test_in_place_enlargement)
398 {
399
400 if (Settings::use_allocation_guard()) {
401 return;
402 }
403
404 // From a MetaspaceArena in a clean room allocate tiny amounts;
405 // watch it grow. Used/committed/capacity should not grow in
406 // large jumps. Also, different types of MetaspaceArena should
407 // have different initial capacities.
408
409 MetaspaceTestContext msthelper;
410 MetaspaceArenaTestHelper smhelper(msthelper, type, is_class, "Grower");
411
412 MetaspaceArenaTestHelper smhelper_harrasser(msthelper, Metaspace::ReflectionMetaspaceType, true, "Harasser");
413
414 size_t used = 0, committed = 0, capacity = 0;
415 const size_t alloc_words = 16;
416
417 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
418 ASSERT_0(used);
419 ASSERT_0(committed);
420 ASSERT_0(capacity);
421
422 ///// First allocation //
423
424 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
425
426 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
427
428 ASSERT_EQ(used, alloc_words);
429 ASSERT_GE(committed, used);
430 ASSERT_GE(capacity, committed);
431
432 ASSERT_EQ(capacity, expected_starting_capacity);
433
434 if (!(Settings::new_chunks_are_fully_committed() && type == Metaspace::BootMetaspaceType)) {
435 // Initial commit charge for the whole context should be one granule
436 ASSERT_EQ(msthelper.committed_words(), Settings::commit_granule_words());
437 // Initial commit number for the arena should be less since - apart from boot loader - no
438 // space type has large initial chunks.
439 ASSERT_LE(committed, Settings::commit_granule_words());
440 }
441
442 ///// subsequent allocations //
443
444 DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
445
446 size_t words_allocated = 0;
447 int num_allocated = 0;
448 const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
449 size_t highest_capacity_jump = capacity;
450 int num_capacity_jumps = 0;
451
452 while (words_allocated < safety && num_capacity_jumps < 15) {
453
454 // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
455 // undisturbed; it will have all the place to grow. Otherwise allocate from a little
456 // side arena to increase fragmentation.
457 // (Note that this does not completely prevent in-place chunk enlargement but makes it
458 // rather improbable)
459 if (!test_in_place_enlargement) {
460 smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
461 }
462
463 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
464 words_allocated += alloc_words;
465 num_allocated ++;
466
467 size_t used2 = 0, committed2 = 0, capacity2 = 0;
468
469 smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
470
471 // used should not grow larger than what we allocated, plus possible overhead.
472 ASSERT_GE(used2, used);
473 ASSERT_LE(used2, used + alloc_words * 2);
474 ASSERT_LE(used2, words_allocated + 100);
475 used = used2;
476
477 // A jump in committed words should not be larger than commit granule size.
478 // It can be smaller, since the current chunk of the MetaspaceArena may be
479 // smaller than a commit granule.
480 // (Note: unless root chunks are born fully committed)
481 ASSERT_GE(committed2, used2);
482 ASSERT_GE(committed2, committed);
483 const size_t committed_jump = committed2 - committed;
484 if (committed_jump > 0 && !Settings::new_chunks_are_fully_committed()) {
485 ASSERT_LE(committed_jump, Settings::commit_granule_words());
486 }
487 committed = committed2;
488
489 // Capacity jumps: Test that arenas capacity does not grow too fast.
490 ASSERT_GE(capacity2, committed2);
491 ASSERT_GE(capacity2, capacity);
492 const size_t capacity_jump = capacity2 - capacity;
493 if (capacity_jump > 0) {
494 LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
495 if (capacity_jump > highest_capacity_jump) {
496 /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
497 * on allocation history. Need to rethink this.
498 ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
499 ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
500 ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
501 */
502 highest_capacity_jump = capacity_jump;
503 }
504 num_capacity_jumps ++;
505 }
506
507 capacity = capacity2;
508
509 }
510
511 // After all this work, we should see an increase in number of chunk-in-place-enlargements
512 // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
513 // complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
514 #ifdef ASSERT
515 if (test_in_place_enlargement) {
516 const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
517 ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
518 }
519 #endif
520 }
521
522 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
523 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
524 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
|
1 /*
2 * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2020 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "memory/metaspace/msArena.hpp"
29 #include "memory/metaspace/msArenaGrowthPolicy.hpp"
30 #include "memory/metaspace/msCommitLimiter.hpp"
31 #include "memory/metaspace/msCounter.hpp"
32 #include "memory/metaspace/msInternalStats.hpp"
33 #include "memory/metaspace/msSettings.hpp"
34 #include "memory/metaspace/msStatistics.hpp"
35 #include "runtime/mutex.hpp"
36 #include "runtime/mutexLocker.hpp"
37 #include "utilities/debug.hpp"
38 #include "utilities/globalDefinitions.hpp"
39
40 //#define LOG_PLEASE
41 #include "metaspaceGtestCommon.hpp"
42 #include "metaspaceGtestContexts.hpp"
43 #include "metaspaceGtestRangeHelpers.hpp"
44
45 using metaspace::ArenaGrowthPolicy;
46 using metaspace::CommitLimiter;
47 using metaspace::InternalStats;
48 using metaspace::MemRangeCounter;
49 using metaspace::MetaspaceArena;
50 using metaspace::SizeAtomicCounter;
51 using metaspace::Settings;
52 using metaspace::ArenaStats;
53
54 // See metaspaceArena.cpp : needed for predicting commit sizes.
55 namespace metaspace {
56 extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size);
57 }
58
59 class MetaspaceArenaTestHelper {
60
61 MetaspaceGtestContext& _context;
62
63 Mutex* _lock;
64 const ArenaGrowthPolicy* _growth_policy;
65 SizeAtomicCounter _used_words_counter;
66 MetaspaceArena* _arena;
67
68 void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") {
69 _growth_policy = growth_policy;
70 _lock = new Mutex(Monitor::native, "gtest-MetaspaceArenaTest-lock", false, Monitor::_safepoint_check_never);
71 // Lock during space creation, since this is what happens in the VM too
72 // (see ClassLoaderData::metaspace_non_null(), which we mimick here).
73 {
74 MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
75 _arena = new MetaspaceArena(&_context.cm(), _growth_policy, _lock, &_used_words_counter, name);
76 }
77 DEBUG_ONLY(_arena->verify());
78
79 }
80
81 public:
82
83 // Create a helper; growth policy for arena is determined by the given spacetype|class tupel
84 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper,
85 Metaspace::MetaspaceType space_type, bool is_class,
86 const char* name = "gtest-MetaspaceArena")
87 :_context(helper)
88 {
89 initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name);
90 }
91
92 // Create a helper; growth policy is directly specified
93 MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy,
94 const char* name = "gtest-MetaspaceArena")
95 :_context(helper)
96 {
97 initialize(growth_policy, name);
98 }
99
100 ~MetaspaceArenaTestHelper() {
101 delete_arena_with_tests();
102 delete _lock;
103 }
104
105 const CommitLimiter& limiter() const { return _context.commit_limiter(); }
106 MetaspaceArena* arena() const { return _arena; }
107 SizeAtomicCounter& used_words_counter() { return _used_words_counter; }
108
109 // Note: all test functions return void due to gtests limitation that we cannot use ASSERT
110 // in non-void returning tests.
111
112 void delete_arena_with_tests() {
113 if (_arena != NULL) {
114 size_t used_words_before = _used_words_counter.get();
115 size_t committed_words_before = limiter().committed_words();
116 DEBUG_ONLY(_arena->verify());
117 delete _arena;
118 _arena = NULL;
119 size_t used_words_after = _used_words_counter.get();
120 size_t committed_words_after = limiter().committed_words();
121 ASSERT_0(used_words_after);
122 if (Settings::uncommit_free_chunks()) {
123 ASSERT_LE(committed_words_after, committed_words_before);
124 } else {
125 ASSERT_EQ(committed_words_after, committed_words_before);
126 }
127 }
128 }
129
130 void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
131 _arena->usage_numbers(p_used, p_committed, p_capacity);
132 if (p_used != NULL) {
133 if (p_committed != NULL) {
134 ASSERT_GE(*p_committed, *p_used);
135 }
136 // Since we own the used words counter, it should reflect our usage number 1:1
154 }
155
156 // Allocate; caller expects failure
157 void allocate_from_arena_with_tests_expect_failure(size_t word_size) {
158 MetaWord* dummy = NULL;
159 allocate_from_arena_with_tests(&dummy, word_size);
160 ASSERT_NULL(dummy);
161 }
162
163 // Allocate; it may or may not work; return value in *p_return_value
164 void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
165
166 // Note: usage_numbers walks all chunks in use and counts.
167 size_t used = 0, committed = 0, capacity = 0;
168 usage_numbers_with_test(&used, &committed, &capacity);
169
170 size_t possible_expansion = limiter().possible_expansion_words();
171
172 MetaWord* p = _arena->allocate(word_size);
173
174 SOMETIMES(DEBUG_ONLY(_arena->verify();))
175
176 size_t used2 = 0, committed2 = 0, capacity2 = 0;
177 usage_numbers_with_test(&used2, &committed2, &capacity2);
178
179 if (p == NULL) {
180 // Allocation failed.
181 if (Settings::new_chunks_are_fully_committed()) {
182 ASSERT_LT(possible_expansion, MAX_CHUNK_WORD_SIZE);
183 } else {
184 ASSERT_LT(possible_expansion, word_size);
185 }
186
187 ASSERT_EQ(used, used2);
188 ASSERT_EQ(committed, committed2);
189 ASSERT_EQ(capacity, capacity2);
190 } else {
191 // Allocation succeeded. Should be correctly aligned.
192 ASSERT_TRUE(is_aligned(p, sizeof(MetaWord)));
193 // used: may go up or may not (since our request may have been satisfied from the freeblocklist
194 // whose content already counts as used).
195 // committed: may go up, may not
196 // capacity: ditto
197 ASSERT_GE(used2, used);
198 ASSERT_GE(committed2, committed);
199 ASSERT_GE(capacity2, capacity);
200 }
201
202 *p_return_value = p;
203 }
204
205 // Allocate; it may or may not work; but caller does not care for the result value
206 void allocate_from_arena_with_tests(size_t word_size) {
207 MetaWord* dummy = NULL;
208 allocate_from_arena_with_tests(&dummy, word_size);
209 }
210
211 void deallocate_with_tests(MetaWord* p, size_t word_size) {
212 size_t used = 0, committed = 0, capacity = 0;
213 usage_numbers_with_test(&used, &committed, &capacity);
214
215 _arena->deallocate(p, word_size);
216
217 SOMETIMES(DEBUG_ONLY(_arena->verify();))
218
219 size_t used2 = 0, committed2 = 0, capacity2 = 0;
220 usage_numbers_with_test(&used2, &committed2, &capacity2);
221
222 // Nothing should have changed. Deallocated blocks are added to the free block list
223 // which still counts as used.
224 ASSERT_EQ(used2, used);
225 ASSERT_EQ(committed2, committed);
226 ASSERT_EQ(capacity2, capacity);
227 }
228
229 ArenaStats get_arena_statistics() const {
230 ArenaStats stats;
231 _arena->add_to_statistics(&stats);
232 return stats;
233 }
234
235 // Convenience method to return number of chunks in arena (including current chunk)
236 int get_number_of_chunks() const {
237 return get_arena_statistics().totals()._num;
238 }
239
240 };
241
242 static void test_basics(size_t commit_limit, bool is_micro) {
243 MetaspaceGtestContext context(commit_limit);
244 MetaspaceArenaTestHelper helper(context, is_micro ? Metaspace::ReflectionMetaspaceType : Metaspace::StandardMetaspaceType, false);
245
246 helper.allocate_from_arena_with_tests(1);
247 helper.allocate_from_arena_with_tests(128);
248 helper.allocate_from_arena_with_tests(128 * K);
249 helper.allocate_from_arena_with_tests(1);
250 helper.allocate_from_arena_with_tests(128);
251 helper.allocate_from_arena_with_tests(128 * K);
252 }
253
254 TEST_VM(metaspace, MetaspaceArena_basics_micro_nolimit) {
255 test_basics(max_uintx, true);
256 }
257
258 TEST_VM(metaspace, MetaspaceArena_basics_micro_limit) {
259 test_basics(256 * K, true);
260 }
261
262 TEST_VM(metaspace, MetaspaceArena_basics_standard_nolimit) {
263 test_basics(max_uintx, false);
264 }
265
266 TEST_VM(metaspace, MetaspaceArena_basics_standard_limit) {
267 test_basics(256 * K, false);
268 }
269
270 // Test chunk enlargement:
271 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up.
272 // We should see at least some occurrences of chunk-in-place enlargement.
273 static void test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, bool is_class) {
274
275 MetaspaceGtestContext context;
276 MetaspaceArenaTestHelper helper(context, (Metaspace::MetaspaceType)spacetype, is_class);
277
278 uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged();
279
280 size_t allocated = 0;
281 while (allocated <= MAX_CHUNK_WORD_SIZE &&
282 metaspace::InternalStats::num_chunks_enlarged() == n1) {
283 size_t s = IntRange(32, 128).random_value();
284 helper.allocate_from_arena_with_tests_expect_success(s);
285 allocated += metaspace::get_raw_word_size_for_requested_word_size(s);
286 }
287
288 EXPECT_GT(metaspace::InternalStats::num_chunks_enlarged(), n1);
289
290 }
291
292 // Do this test for some of the standard types; don't do it for the boot loader type
293 // since that one starts out with max chunk size so we would not see any enlargement.
294
295 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_c) {
296 test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, true);
297 }
298
299 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_standard_nc) {
300 test_chunk_enlargment_simple(Metaspace::StandardMetaspaceType, false);
301 }
302
303 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_c) {
304 test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, true);
305 }
306
307 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_micro_nc) {
308 test_chunk_enlargment_simple(Metaspace::ReflectionMetaspaceType, false);
309 }
310
311 // Test chunk enlargement:
312 // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up.
313 // We should see occurrences of chunk-in-place enlargement.
314 // Here, we give it an ideal policy which should enable the initial chunk to grow unmolested
315 // until finish.
316 TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) {
317
318 if (Settings::use_allocation_guard()) {
319 return;
320 }
321
322 // Note: internally, chunk in-place enlargement is disallowed if growing the chunk
323 // would cause the arena to claim more memory than its growth policy allows. This
324 // is done to prevent the arena to grow too fast.
325 //
326 // In order to test in-place growth here without that restriction I give it an
327 // artificial growth policy which starts out with a tiny chunk size, then balloons
328 // right up to max chunk size. This will cause the initial chunk to be tiny, and
329 // then the arena is able to grow it without violating growth policy.
330 chunklevel_t growth[] = { HIGHEST_CHUNK_LEVEL, ROOT_CHUNK_LEVEL };
331 ArenaGrowthPolicy growth_policy(growth, 2);
332
333 MetaspaceGtestContext context;
334 MetaspaceArenaTestHelper helper(context, &growth_policy);
335
336 uint64_t n1 = metaspace::InternalStats::num_chunks_enlarged();
337
338 size_t allocated = 0;
339 while (allocated <= MAX_CHUNK_WORD_SIZE) {
340 size_t s = IntRange(32, 128).random_value();
341 helper.allocate_from_arena_with_tests_expect_success(s);
342 allocated += metaspace::get_raw_word_size_for_requested_word_size(s);
343 if (allocated <= MAX_CHUNK_WORD_SIZE) {
344 // Chunk should have been enlarged in place
345 ASSERT_EQ(1, helper.get_number_of_chunks());
346 } else {
347 // Next chunk should have started
348 ASSERT_EQ(2, helper.get_number_of_chunks());
349 }
350 }
351
352 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
353 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
354
355 ASSERT_GT0(times_chunk_were_enlarged);
356
357 }
358
359 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow,
360 // test that in place enlargement correctly fails if growing the chunk would bring us
361 // beyond the max. size of a chunk.
362 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_max_chunk_size) {
363
364 if (Settings::use_allocation_guard()) {
365 return;
366 }
367
368 MetaspaceGtestContext context;
369
370 for (size_t first_allocation_size = 1; first_allocation_size <= MAX_CHUNK_WORD_SIZE / 2; first_allocation_size *= 2) {
371
372 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
373
374 // we allocate first a small amount, then the full amount possible.
375 // The sum of first and second allocation should bring us above root chunk size.
376 // This should work, we should not see any problems, but no chunk enlargement should
377 // happen.
378 int n1 = metaspace::InternalStats::num_chunks_enlarged();
379
380 helper.allocate_from_arena_with_tests_expect_success(first_allocation_size);
381 EXPECT_EQ(helper.get_number_of_chunks(), 1);
382
383 helper.allocate_from_arena_with_tests_expect_success(MAX_CHUNK_WORD_SIZE - first_allocation_size + 1);
384 EXPECT_EQ(helper.get_number_of_chunks(), 2);
385
386 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
387 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
388
389 EXPECT_0(times_chunk_were_enlarged);
390
391 }
392 }
393
394 // Regression test: Given a single MetaspaceArena, left undisturbed with place to grow,
395 // test that in place enlargement correctly fails if growing the chunk would cause more
396 // than doubling its size
397 TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_doubling_chunk_size) {
398
399 if (Settings::use_allocation_guard()) {
400 return;
401 }
402
403 MetaspaceGtestContext context;
404 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
405
406 int n1 = metaspace::InternalStats::num_chunks_enlarged();
407
408 helper.allocate_from_arena_with_tests_expect_success(1000);
409 EXPECT_EQ(helper.get_number_of_chunks(), 1);
410
411 helper.allocate_from_arena_with_tests_expect_success(4000);
412 EXPECT_EQ(helper.get_number_of_chunks(), 2);
413
414 int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
415 LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
416
417 EXPECT_0(times_chunk_were_enlarged);
418
419 }
420
421 // Test the MetaspaceArenas' free block list:
422 // Allocate, deallocate, then allocate the same block again. The second allocate should
423 // reuse the deallocated block.
424 TEST_VM(metaspace, MetaspaceArena_deallocate) {
425 if (Settings::use_allocation_guard()) {
426 return;
427 }
428 for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
429 MetaspaceGtestContext context;
430 MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
431
432 MetaWord* p1 = NULL;
433 helper.allocate_from_arena_with_tests_expect_success(&p1, s);
434
435 size_t used1 = 0, capacity1 = 0;
436 helper.usage_numbers_with_test(&used1, NULL, &capacity1);
437 ASSERT_EQ(used1, s);
438
439 helper.deallocate_with_tests(p1, s);
440
441 size_t used2 = 0, capacity2 = 0;
442 helper.usage_numbers_with_test(&used2, NULL, &capacity2);
443 ASSERT_EQ(used1, used2);
444 ASSERT_EQ(capacity2, capacity2);
445
446 MetaWord* p2 = NULL;
447 helper.allocate_from_arena_with_tests_expect_success(&p2, s);
448
449 size_t used3 = 0, capacity3 = 0;
450 helper.usage_numbers_with_test(&used3, NULL, &capacity3);
457 }
458
459 static void test_recover_from_commit_limit_hit() {
460
461 if (Settings::new_chunks_are_fully_committed()) {
462 return; // This would throw off the commit counting in this test.
463 }
464
465 // Test:
466 // - Multiple MetaspaceArena allocate (operating under the same commit limiter).
467 // - One, while attempting to commit parts of its current chunk on demand,
468 // triggers the limit and cannot commit its chunk further.
469 // - We release the other MetaspaceArena - its content is put back to the
470 // freelists.
471 // - We re-attempt allocation from the first manager. It should now succeed.
472 //
473 // This means if the first MetaspaceArena may have to let go of its current chunk and
474 // retire it and take a fresh chunk from the freelist.
475
476 const size_t commit_limit = Settings::commit_granule_words() * 10;
477 MetaspaceGtestContext context(commit_limit);
478
479 // The first MetaspaceArena mimicks a micro loader. This will fill the free
480 // chunk list with very small chunks. We allocate from them in an interleaved
481 // way to cause fragmentation.
482 MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false);
483 MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false);
484
485 // This MetaspaceArena should hit the limit. We use BootMetaspaceType here since
486 // it gets a large initial chunk which is committed
487 // on demand and we are likely to hit a commit limit while trying to expand it.
488 MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false);
489
490 // Allocate space until we have below two but above one granule left
491 size_t allocated_from_1_and_2 = 0;
492 while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
493 allocated_from_1_and_2 < commit_limit) {
494 helper1.allocate_from_arena_with_tests_expect_success(1);
495 helper2.allocate_from_arena_with_tests_expect_success(1);
496 allocated_from_1_and_2 += 2;
497 }
498
499 // Now, allocating from helper3, creep up on the limit
500 size_t allocated_from_3 = 0;
501 MetaWord* p = NULL;
502 while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != NULL) &&
503 ++allocated_from_3 < Settings::commit_granule_words() * 2);
504
505 EXPECT_LE(allocated_from_3, Settings::commit_granule_words() * 2);
506
507 // We expect the freelist to be empty of committed space...
508 EXPECT_0(context.cm().total_committed_word_size());
509
510 //msthelper.cm().print_on(tty);
511
512 // Release the first MetaspaceArena.
513 helper1.delete_arena_with_tests();
514
515 //msthelper.cm().print_on(tty);
516
517 // Should have populated the freelist with committed space
518 // We expect the freelist to be empty of committed space...
519 EXPECT_GT(context.cm().total_committed_word_size(), (size_t)0);
520
521 // Repeat allocation from helper3, should now work.
522 helper3.allocate_from_arena_with_tests_expect_success(1);
523
524 }
525
526 TEST_VM(metaspace, MetaspaceArena_recover_from_limit_hit) {
527 test_recover_from_commit_limit_hit();
528 }
529
530 static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class,
531 size_t expected_starting_capacity,
532 bool test_in_place_enlargement)
533 {
534
535 if (Settings::use_allocation_guard()) {
536 return;
537 }
538
539 // From a MetaspaceArena in a clean room allocate tiny amounts;
540 // watch it grow. Used/committed/capacity should not grow in
541 // large jumps. Also, different types of MetaspaceArena should
542 // have different initial capacities.
543
544 MetaspaceGtestContext context;
545 MetaspaceArenaTestHelper smhelper(context, type, is_class, "Grower");
546
547 MetaspaceArenaTestHelper smhelper_harrasser(context, Metaspace::ReflectionMetaspaceType, true, "Harasser");
548
549 size_t used = 0, committed = 0, capacity = 0;
550 const size_t alloc_words = 16;
551
552 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
553 ASSERT_0(used);
554 ASSERT_0(committed);
555 ASSERT_0(capacity);
556
557 ///// First allocation //
558
559 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
560
561 smhelper.arena()->usage_numbers(&used, &committed, &capacity);
562
563 ASSERT_EQ(used, alloc_words);
564 ASSERT_GE(committed, used);
565 ASSERT_GE(capacity, committed);
566
567 ASSERT_EQ(capacity, expected_starting_capacity);
568
569 if (!(Settings::new_chunks_are_fully_committed() && type == Metaspace::BootMetaspaceType)) {
570 // Initial commit charge for the whole context should be one granule
571 ASSERT_EQ(context.committed_words(), Settings::commit_granule_words());
572 // Initial commit number for the arena should be less since - apart from boot loader - no
573 // space type has large initial chunks.
574 ASSERT_LE(committed, Settings::commit_granule_words());
575 }
576
577 ///// subsequent allocations //
578
579 DEBUG_ONLY(const uintx num_chunk_enlarged = metaspace::InternalStats::num_chunks_enlarged();)
580
581 size_t words_allocated = 0;
582 int num_allocated = 0;
583 const size_t safety = MAX_CHUNK_WORD_SIZE * 1.2;
584 size_t highest_capacity_jump = capacity;
585 int num_capacity_jumps = 0;
586
587 while (words_allocated < safety && num_capacity_jumps < 15) {
588
589 // if we want to test growth with in-place chunk enlargement, leave MetaspaceArena
590 // undisturbed; it will have all the place to grow. Otherwise allocate from a little
591 // side arena to increase fragmentation.
592 // (Note that this does not completely prevent in-place chunk enlargement but makes it
593 // rather improbable)
594 if (!test_in_place_enlargement) {
595 smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
596 }
597
598 smhelper.allocate_from_arena_with_tests_expect_success(alloc_words);
599 words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words);
600 num_allocated++;
601
602 size_t used2 = 0, committed2 = 0, capacity2 = 0;
603
604 smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2);
605
606 // used should not grow larger than what we allocated, plus possible overhead.
607 ASSERT_GE(used2, used);
608 ASSERT_LE(used2, used + alloc_words * 2);
609 ASSERT_LE(used2, words_allocated + 100);
610 used = used2;
611
612 // A jump in committed words should not be larger than commit granule size.
613 // It can be smaller, since the current chunk of the MetaspaceArena may be
614 // smaller than a commit granule.
615 // (Note: unless root chunks are born fully committed)
616 ASSERT_GE(committed2, used2);
617 ASSERT_GE(committed2, committed);
618 const size_t committed_jump = committed2 - committed;
619 if (committed_jump > 0 && !Settings::new_chunks_are_fully_committed()) {
620 ASSERT_LE(committed_jump, Settings::commit_granule_words());
621 }
622 committed = committed2;
623
624 // Capacity jumps: Test that arenas capacity does not grow too fast.
625 ASSERT_GE(capacity2, committed2);
626 ASSERT_GE(capacity2, capacity);
627 const size_t capacity_jump = capacity2 - capacity;
628 if (capacity_jump > 0) {
629 LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump)
630 if (capacity_jump > highest_capacity_jump) {
631 /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent
632 * on allocation history. Need to rethink this.
633 ASSERT_LE(capacity_jump, highest_capacity_jump * 2);
634 ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE);
635 ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
636 */
637 highest_capacity_jump = capacity_jump;
638 }
639 num_capacity_jumps++;
640 }
641
642 capacity = capacity2;
643
644 }
645
646 // After all this work, we should see an increase in number of chunk-in-place-enlargements
647 // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat
648 // complicated, see MetaspaceArena::attempt_enlarge_current_chunk())
649 #ifdef ASSERT
650 if (test_in_place_enlargement) {
651 const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
652 ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
653 }
654 #endif
655 }
656
657 // these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
658 TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
659 test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
|