6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26 #include "logging/log.hpp"
27 #include "logging/logStream.hpp"
28 #include "memory/binaryTreeDictionary.inline.hpp"
29 #include "memory/freeList.inline.hpp"
30 #include "memory/metaspace/chunkManager.hpp"
31 #include "memory/metaspace/metachunk.hpp"
32 #include "memory/metaspace/metaDebug.hpp"
33 #include "memory/metaspace/metaspaceCommon.hpp"
34 #include "memory/metaspace/metaspaceStatistics.hpp"
35 #include "memory/metaspace/occupancyMap.hpp"
36 #include "memory/metaspace/virtualSpaceNode.hpp"
37 #include "runtime/mutexLocker.hpp"
38 #include "utilities/debug.hpp"
39 #include "utilities/globalDefinitions.hpp"
40 #include "utilities/ostream.hpp"
41
42 namespace metaspace {
43
44 ChunkManager::ChunkManager(bool is_class)
45 : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
46 _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
47 _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
48 _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
49 }
50
51 void ChunkManager::remove_chunk(Metachunk* chunk) {
52 size_t word_size = chunk->word_size();
53 ChunkIndex index = list_index(word_size);
54 if (index != HumongousIndex) {
55 free_chunks(index)->remove_chunk(chunk);
56 } else {
57 humongous_dictionary()->remove_chunk(chunk);
58 }
59
60 // Chunk has been removed from the chunks free list, update counters.
61 account_for_removed_chunk(chunk);
62 }
63
64 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
65 assert_lock_strong(MetaspaceExpand_lock);
66 assert(chunk != NULL, "invalid chunk pointer");
67 // Check for valid merge combinations.
68 assert((chunk->get_chunk_type() == SpecializedIndex &&
69 (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
70 (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
71 "Invalid chunk merge combination.");
72
73 const size_t target_chunk_word_size =
74 get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
75
76 // [ prospective merge region )
77 MetaWord* const p_merge_region_start =
78 (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
79 MetaWord* const p_merge_region_end =
80 p_merge_region_start + target_chunk_word_size;
81
82 // We need the VirtualSpaceNode containing this chunk and its occupancy map.
83 VirtualSpaceNode* const vsn = chunk->container();
84 OccupancyMap* const ocmap = vsn->occupancy_map();
85
86 // The prospective chunk merge range must be completely contained by the
87 // committed range of the virtual space node.
88 if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
89 return false;
90 }
91
92 // Only attempt to merge this range if at its start a chunk starts and at its end
93 // a chunk ends. If a chunk (can only be humongous) straddles either start or end
94 // of that range, we cannot merge.
95 if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
96 return false;
97 }
98 if (p_merge_region_end < vsn->top() &&
99 !ocmap->chunk_starts_at_address(p_merge_region_end)) {
100 return false;
101 }
102
103 // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
104 if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
105 return false;
106 }
107
108 // Success! Remove all chunks in this region...
109 log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
110 (is_class() ? "class space" : "metaspace"),
111 p_merge_region_start, p_merge_region_end);
112
113 const int num_chunks_removed =
114 remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
115
116 // ... and create a single new bigger chunk.
117 Metachunk* const p_new_chunk =
118 ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
119 assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
120 p_new_chunk->set_origin(origin_merge);
121
122 log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
123 (is_class() ? "class space" : "metaspace"),
124 p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
125
126 // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
127 ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
128 ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
129
130 // Mark chunk as free. Note: it is not necessary to update the occupancy
131 // map in-use map, because the old chunks were also free, so nothing
132 // should have changed.
133 p_new_chunk->set_is_tagged_free(true);
134
135 // Add new chunk to its freelist.
136 ChunkList* const list = free_chunks(target_chunk_type);
137 list->return_chunk_at_head(p_new_chunk);
138
139 // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
140 // should not have changed, because the size of the space should be the same)
141 _free_chunks_count -= num_chunks_removed;
142 _free_chunks_count ++;
143
144 // VirtualSpaceNode::chunk_count does not have to be modified:
145 // it means "number of active (non-free) chunks", so merging free chunks
146 // should not affect that count.
147
148 // At the end of a chunk merge, run verification tests.
149 #ifdef ASSERT
150
151 EVERY_NTH(VerifyMetaspaceInterval)
152 locked_verify(true);
153 vsn->verify(true);
154 END_EVERY_NTH
155
156 g_internal_statistics.num_chunk_merges ++;
157
158 #endif
159
160 return true;
161 }
162
163 // Remove all chunks in the given area - the chunks are supposed to be free -
164 // from their corresponding freelists. Mark them as invalid.
165 // - This does not correct the occupancy map.
166 // - This does not adjust the counters in ChunkManager.
167 // - Does not adjust container count counter in containing VirtualSpaceNode
168 // Returns number of chunks removed.
169 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
170 assert(p != NULL && word_size > 0, "Invalid range.");
171 const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
172 assert_is_aligned(word_size, smallest_chunk_size);
173
174 Metachunk* const start = (Metachunk*) p;
175 const Metachunk* const end = (Metachunk*)(p + word_size);
176 Metachunk* cur = start;
177 int num_removed = 0;
178 while (cur < end) {
179 Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
180 DEBUG_ONLY(do_verify_chunk(cur));
181 assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
182 assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
183 log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
184 (is_class() ? "class space" : "metaspace"),
185 cur, cur->word_size() * sizeof(MetaWord));
186 cur->remove_sentinel();
187 // Note: cannot call ChunkManager::remove_chunk, because that
188 // modifies the counters in ChunkManager, which we do not want. So
189 // we call remove_chunk on the freelist directly (see also the
190 // splitting function which does the same).
191 ChunkList* const list = free_chunks(list_index(cur->word_size()));
192 list->remove_chunk(cur);
193 num_removed ++;
194 cur = next;
195 }
196 return num_removed;
197 }
198
199 // Update internal accounting after a chunk was added
200 void ChunkManager::account_for_added_chunk(const Metachunk* c) {
201 assert_lock_strong(MetaspaceExpand_lock);
202 _free_chunks_count ++;
203 _free_chunks_total += c->word_size();
204 }
205
206 // Update internal accounting after a chunk was removed
207 void ChunkManager::account_for_removed_chunk(const Metachunk* c) {
208 assert_lock_strong(MetaspaceExpand_lock);
209 assert(_free_chunks_count >= 1,
210 "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count);
211 assert(_free_chunks_total >= c->word_size(),
212 "ChunkManager::_free_chunks_total: about to go negative"
213 "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size());
214 _free_chunks_count --;
215 _free_chunks_total -= c->word_size();
216 }
217
218 ChunkIndex ChunkManager::list_index(size_t size) {
219 return get_chunk_type_by_size(size, is_class());
220 }
221
222 size_t ChunkManager::size_by_index(ChunkIndex index) const {
223 index_bounds_check(index);
224 assert(index != HumongousIndex, "Do not call for humongous chunks.");
225 return get_size_for_nonhumongous_chunktype(index, is_class());
226 }
227
228 #ifdef ASSERT
229 void ChunkManager::verify(bool slow) const {
230 MutexLocker cl(MetaspaceExpand_lock,
231 Mutex::_no_safepoint_check_flag);
232 locked_verify(slow);
233 }
234
235 void ChunkManager::locked_verify(bool slow) const {
236 log_trace(gc, metaspace, freelist)("verifying %s chunkmanager (%s).",
237 (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
238
239 assert_lock_strong(MetaspaceExpand_lock);
240
241 size_t chunks_counted = 0;
242 size_t wordsize_chunks_counted = 0;
243 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
244 const ChunkList* list = _free_chunks + i;
245 if (list != NULL) {
246 Metachunk* chunk = list->head();
247 while (chunk) {
248 if (slow) {
249 do_verify_chunk(chunk);
250 }
251 assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
252 chunks_counted ++;
253 wordsize_chunks_counted += chunk->size();
254 chunk = chunk->next();
255 }
256 }
257 }
258
259 chunks_counted += humongous_dictionary()->total_free_blocks();
260 wordsize_chunks_counted += humongous_dictionary()->total_size();
261
262 assert(chunks_counted == _free_chunks_count && wordsize_chunks_counted == _free_chunks_total,
263 "freelist accounting mismatch: "
264 "we think: " SIZE_FORMAT " chunks, total " SIZE_FORMAT " words, "
265 "reality: " SIZE_FORMAT " chunks, total " SIZE_FORMAT " words.",
266 _free_chunks_count, _free_chunks_total,
267 chunks_counted, wordsize_chunks_counted);
268 }
269 #endif // ASSERT
270
271 void ChunkManager::locked_print_free_chunks(outputStream* st) {
272 assert_lock_strong(MetaspaceExpand_lock);
273 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
274 _free_chunks_total, _free_chunks_count);
275 }
276
277 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
278 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
279 "Bad index: %d", (int)index);
280 return &_free_chunks[index];
281 }
282
283 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
284 ChunkIndex index = list_index(word_size);
285 assert(index < HumongousIndex, "No humongous list");
286 return free_chunks(index);
287 }
288
289 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
290 // split up the larger chunk into n smaller chunks, at least one of which should be
291 // the target chunk of target chunk size. The smaller chunks, including the target
292 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
293 // Note that this chunk is supposed to be removed from the freelist right away.
294 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
295 assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
296
297 const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
298 const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
299
300 MetaWord* const region_start = (MetaWord*)larger_chunk;
301 const size_t region_word_len = larger_chunk->word_size();
302 MetaWord* const region_end = region_start + region_word_len;
303 VirtualSpaceNode* const vsn = larger_chunk->container();
304 OccupancyMap* const ocmap = vsn->occupancy_map();
305
306 // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
307 // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
308 // at an address suitable to place the smaller target chunk.
309 assert_is_aligned(region_start, target_chunk_word_size);
310
311 // Remove old chunk.
312 free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
313 larger_chunk->remove_sentinel();
314
315 // Prevent access to the old chunk from here on.
316 larger_chunk = NULL;
317 // ... and wipe it.
318 DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
319
320 // In its place create first the target chunk...
321 MetaWord* p = region_start;
322 Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
323 assert(target_chunk == (Metachunk*)p, "Sanity");
324 target_chunk->set_origin(origin_split);
325
326 // Note: we do not need to mark its start in the occupancy map
327 // because it coincides with the old chunk start.
328
329 // Mark chunk as free and return to the freelist.
330 do_update_in_use_info_for_chunk(target_chunk, false);
331 free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
332
333 // This chunk should now be valid and can be verified.
334 DEBUG_ONLY(do_verify_chunk(target_chunk));
335
336 // In the remaining space create the remainder chunks.
337 p += target_chunk->word_size();
338 assert(p < region_end, "Sanity");
339
340 while (p < region_end) {
341
342 // Find the largest chunk size which fits the alignment requirements at address p.
343 ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
344 size_t this_chunk_word_size = 0;
345 for(;;) {
346 this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
347 if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
348 break;
349 } else {
350 this_chunk_index = prev_chunk_index(this_chunk_index);
351 assert(this_chunk_index >= target_chunk_index, "Sanity");
352 }
353 }
354
355 assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
356 assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
357 assert(p + this_chunk_word_size <= region_end, "Sanity");
358
359 // Create splitting chunk.
360 Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
361 assert(this_chunk == (Metachunk*)p, "Sanity");
362 this_chunk->set_origin(origin_split);
363 ocmap->set_chunk_starts_at_address(p, true);
364 do_update_in_use_info_for_chunk(this_chunk, false);
365
366 // This chunk should be valid and can be verified.
367 DEBUG_ONLY(do_verify_chunk(this_chunk));
368
369 // Return this chunk to freelist and correct counter.
370 free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
371 _free_chunks_count ++;
372
373 log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
374 SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
375 p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
376 p2i(region_start), p2i(region_end));
377
378 p += this_chunk_word_size;
379
380 }
381
382 // Note: at this point, the VirtualSpaceNode is invalid since we split a chunk and
383 // did not yet hand out part of that split; so, vsn->verify_free_chunks_are_ideally_merged()
384 // would assert. Instead, do all verifications in the caller.
385
386 DEBUG_ONLY(g_internal_statistics.num_chunk_splits ++);
387
388 return target_chunk;
389 }
390
391 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
392 assert_lock_strong(MetaspaceExpand_lock);
393
394 Metachunk* chunk = NULL;
395 bool we_did_split_a_chunk = false;
396
397 if (list_index(word_size) != HumongousIndex) {
398
399 ChunkList* free_list = find_free_chunks_list(word_size);
400 assert(free_list != NULL, "Sanity check");
401
402 chunk = free_list->head();
403
404 if (chunk == NULL) {
405 // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
406 // This is the counterpart of the coalescing-upon-chunk-return.
407
408 ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
409
410 // Is there a larger chunk we could split?
411 Metachunk* larger_chunk = NULL;
412 ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
413 while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
414 larger_chunk = free_chunks(larger_chunk_index)->head();
415 if (larger_chunk == NULL) {
416 larger_chunk_index = next_chunk_index(larger_chunk_index);
417 }
418 }
419
420 if (larger_chunk != NULL) {
421 assert(larger_chunk->word_size() > word_size, "Sanity");
422 assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
423
424 // We found a larger chunk. Lets split it up:
425 // - remove old chunk
426 // - in its place, create new smaller chunks, with at least one chunk
427 // being of target size, the others sized as large as possible. This
428 // is to make sure the resulting chunks are "as coalesced as possible"
429 // (similar to VirtualSpaceNode::retire()).
430 // Note: during this operation both ChunkManager and VirtualSpaceNode
431 // are temporarily invalid, so be careful with asserts.
432
433 log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
434 ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
435 (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
436 chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
437
438 chunk = split_chunk(word_size, larger_chunk);
439
440 // This should have worked.
441 assert(chunk != NULL, "Sanity");
442 assert(chunk->word_size() == word_size, "Sanity");
443 assert(chunk->is_tagged_free(), "Sanity");
444
445 we_did_split_a_chunk = true;
446
447 }
448 }
449
450 if (chunk == NULL) {
451 return NULL;
452 }
453
454 // Remove the chunk as the head of the list.
455 free_list->remove_chunk(chunk);
456
457 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
458 p2i(free_list), free_list->count());
459
460 } else {
461 chunk = humongous_dictionary()->get_chunk(word_size);
462
463 if (chunk == NULL) {
464 return NULL;
465 }
466
467 log_trace(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
468 chunk->word_size(), word_size, chunk->word_size() - word_size);
469 }
470
471 // Chunk has been removed from the chunk manager; update counters.
472 account_for_removed_chunk(chunk);
473 do_update_in_use_info_for_chunk(chunk, true);
474 chunk->container()->inc_container_count();
475 chunk->inc_use_count();
476
477 // Remove it from the links to this freelist
478 chunk->set_next(NULL);
479 chunk->set_prev(NULL);
480
481 // Run some verifications (some more if we did a chunk split)
482 #ifdef ASSERT
483
484 EVERY_NTH(VerifyMetaspaceInterval)
485 // Be extra verify-y when chunk split happened.
486 locked_verify(true);
487 VirtualSpaceNode* const vsn = chunk->container();
488 vsn->verify(true);
489 if (we_did_split_a_chunk) {
490 vsn->verify_free_chunks_are_ideally_merged();
491 }
492 END_EVERY_NTH
493
494 g_internal_statistics.num_chunks_removed_from_freelist ++;
495
496 #endif
497
498 return chunk;
499 }
500
501 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
502 assert_lock_strong(MetaspaceExpand_lock);
503
504 // Take from the beginning of the list
505 Metachunk* chunk = free_chunks_get(word_size);
506 if (chunk == NULL) {
507 return NULL;
508 }
509
510 assert((word_size <= chunk->word_size()) ||
511 (list_index(chunk->word_size()) == HumongousIndex),
512 "Non-humongous variable sized chunk");
513 LogTarget(Trace, gc, metaspace, freelist) lt;
514 if (lt.is_enabled()) {
515 size_t list_count;
516 if (list_index(word_size) < HumongousIndex) {
517 ChunkList* list = find_free_chunks_list(word_size);
518 list_count = list->count();
519 } else {
520 list_count = humongous_dictionary()->total_count();
521 }
522 LogStream ls(lt);
523 ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
524 p2i(this), p2i(chunk), chunk->word_size(), list_count);
525 ResourceMark rm;
526 locked_print_free_chunks(&ls);
527 }
528
529 return chunk;
530 }
531
532 void ChunkManager::return_single_chunk(Metachunk* chunk) {
533
534 #ifdef ASSERT
535 EVERY_NTH(VerifyMetaspaceInterval)
536 this->locked_verify(false);
537 do_verify_chunk(chunk);
538 END_EVERY_NTH
539 #endif
540
541 const ChunkIndex index = chunk->get_chunk_type();
542 assert_lock_strong(MetaspaceExpand_lock);
543 DEBUG_ONLY(g_internal_statistics.num_chunks_added_to_freelist ++;)
544 assert(chunk != NULL, "Expected chunk.");
545 assert(chunk->container() != NULL, "Container should have been set.");
546 assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
547 index_bounds_check(index);
548
549 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
550 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
551 // keeps tree node pointers in the chunk payload area which mangle will overwrite.
552 DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
553
554 // may need node for verification later after chunk may have been merged away.
555 DEBUG_ONLY(VirtualSpaceNode* vsn = chunk->container(); )
556
557 if (index != HumongousIndex) {
558 // Return non-humongous chunk to freelist.
559 ChunkList* list = free_chunks(index);
560 assert(list->size() == chunk->word_size(), "Wrong chunk type.");
561 list->return_chunk_at_head(chunk);
562 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
563 chunk_size_name(index), p2i(chunk));
564 } else {
565 // Return humongous chunk to dictionary.
566 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
567 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
568 "Humongous chunk has wrong alignment.");
569 _humongous_dictionary.return_chunk(chunk);
570 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
571 chunk_size_name(index), p2i(chunk), chunk->word_size());
572 }
573 chunk->container()->dec_container_count();
574 do_update_in_use_info_for_chunk(chunk, false);
575
576 // Chunk has been added; update counters.
577 account_for_added_chunk(chunk);
578
579 // Attempt coalesce returned chunks with its neighboring chunks:
580 // if this chunk is small or special, attempt to coalesce to a medium chunk.
581 if (index == SmallIndex || index == SpecializedIndex) {
582 if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
583 // This did not work. But if this chunk is special, we still may form a small chunk?
584 if (index == SpecializedIndex) {
585 if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
586 // give up.
587 }
588 }
589 }
590 }
591
592 // From here on do not access chunk anymore, it may have been merged with another chunk.
593
594 #ifdef ASSERT
595 EVERY_NTH(VerifyMetaspaceInterval)
596 this->locked_verify(true);
597 vsn->verify(true);
598 vsn->verify_free_chunks_are_ideally_merged();
599 END_EVERY_NTH
600 #endif
601
602 }
603
604 void ChunkManager::return_chunk_list(Metachunk* chunks) {
605 if (chunks == NULL) {
606 return;
607 }
608 LogTarget(Trace, gc, metaspace, freelist) log;
609 if (log.is_enabled()) { // tracing
610 log.print("returning list of chunks...");
611 }
612 unsigned num_chunks_returned = 0;
613 size_t size_chunks_returned = 0;
614 Metachunk* cur = chunks;
615 while (cur != NULL) {
616 // Capture the next link before it is changed
617 // by the call to return_chunk_at_head();
618 Metachunk* next = cur->next();
619 if (log.is_enabled()) { // tracing
620 num_chunks_returned ++;
621 size_chunks_returned += cur->word_size();
622 }
623 return_single_chunk(cur);
624 cur = next;
625 }
626 if (log.is_enabled()) { // tracing
627 log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".",
628 num_chunks_returned, size_chunks_returned);
629 }
630 }
631
632 void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const {
633 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
634 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
635 out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord));
636 }
637 }
638
639 } // namespace metaspace
640
641
642
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26
27 #include "logging/log.hpp"
28 #include "memory/metaspace/constants.hpp"
29 #include "memory/metaspace/chunkAllocSequence.hpp"
30 #include "memory/metaspace/chunkLevel.hpp"
31 #include "memory/metaspace/chunkManager.hpp"
32 #include "memory/metaspace/metachunk.hpp"
33 #include "memory/metaspace/metaspaceStatistics.hpp"
34 #include "memory/metaspace/virtualSpaceNode.hpp"
35 #include "memory/metaspace/virtualSpaceList.hpp"
36 #include "runtime/mutexLocker.hpp"
37 #include "utilities/debug.hpp"
38 #include "utilities/globalDefinitions.hpp"
39
40 namespace metaspace {
41
42
43 // Return a single chunk to the freelist and adjust accounting. No merge is attempted.
44 void ChunkManager::return_chunk_simple(Metachunk* c) {
45
46 assert_lock_strong(MetaspaceExpand_lock);
47
48 DEBUG_ONLY(c->verify(false));
49
50 const chklvl_t lvl = c->level();
51 _chunks[lvl].add(c);
52 _total_word_size.increment_by(c->word_size());
53 _committed_word_size.increment_by(c->committed_words());
54
55 // Tracing
56 log_debug(metaspace)("ChunkManager %s: returned chunk " PTR_FORMAT ", level " CHKLVL_FORMAT,
57 _name, p2i(c), c->level());
58
59 }
60
61 // Take a single chunk from the given freelist and adjust counters. Returns NULL
62 // if there is no fitting chunk for this level.
63 Metachunk* ChunkManager::remove_first_chunk_at_level(chklvl_t l) {
64
65 assert_lock_strong(MetaspaceExpand_lock);
66 DEBUG_ONLY(chklvl::check_valid_level(l);)
67
68 Metachunk* c = _chunks[l].remove_first();
69 if (c != NULL) {
70 _total_word_size.decrement_by(c->word_size());
71 _committed_word_size.decrement_by(c->committed_words());
72 }
73
74 // Tracing
75 if (c != NULL) {
76 log_debug(metaspace)("ChunkManager %s: removed chunk " PTR_FORMAT ", level " CHKLVL_FORMAT,
77 _name, p2i(c), c->level());
78 } else {
79 log_trace(metaspace)("ChunkManager %s: no chunk found for level " CHKLVL_FORMAT,
80 _name, l);
81 }
82
83 return c;
84 }
85
86 // Creates a chunk manager with a given name (which is for debug purposes only)
87 // and an associated space list which will be used to request new chunks from
88 // (see get_chunk())
89 ChunkManager::ChunkManager(const char* name, VirtualSpaceList* space_list)
90 : _vslist(space_list),
91 _name(name),
92 _chunks()
93 {
94 }
95
96 // Given a chunk we are about to handout to the caller, make sure it is committed
97 // according to constants::committed_words_on_fresh_chunks
98 bool ChunkManager::commit_chunk_before_handout(Metachunk* c) {
99 assert_lock_strong(MetaspaceExpand_lock);
100 const size_t must_be_committed = MIN2(c->word_size(), constants::committed_words_on_fresh_chunks);
101 return c->ensure_committed_locked(must_be_committed);
102 }
103
104 #ifdef ASSERT
105 // Given a splinters array returned from a split operation, check that it meets expectations
106 static void check_splinters_array(Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS], chklvl_t min, chklvl_t max) {
107 // The array shall contain splinters in the range [min, max] and nothing outside. The chunk levels for
108 // the chunks must match too.
109 for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
110 if (l >= min && l < max) {
111 assert(splinters[l] != NULL, "Missing splinters");
112 assert(splinters[l]->level() == l, "Unexpected level");
113 splinters[l]->verify(false);
114 } else {
115 assert(splinters[l] == NULL, "Unexpected splinters");
116 }
117 }
118 }
119 #endif
120
121
122 // Given a chunk which must be outside of a freelist and must be free, split it to
123 // meet a target level and return it. Splinters are added to the freelist.
124 Metachunk* ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chklvl_t target_level) {
125
126 assert_lock_strong(MetaspaceExpand_lock);
127
128 assert(c->is_free() && c->level() < target_level, "Invalid chunk for splitting");
129 DEBUG_ONLY(chklvl::check_valid_level(target_level);)
130
131 // Chunk must be outside of our freelists
132 assert(_chunks[c->level()].contains(c) == false, "Chunk is in freelist.");
133
134 log_debug(metaspace)("ChunkManager %s: chunk " PTR_FORMAT ", level " CHKLVL_FORMAT
135 ": split to " CHKLVL_FORMAT ".",
136 _name, p2i(c), c->level(), target_level);
137
138 const chklvl_t orig_level = c->level();
139 Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS] = { 0 };
140 c = c->vsnode()->split(target_level, c, splinters);
141
142 // Splitting should never fail.
143 assert(c != NULL, "Split failed");
144 assert(c->level() == target_level, "Sanity");
145 DEBUG_ONLY(c->verify(false));
146 DEBUG_ONLY(check_splinters_array(splinters, orig_level + 1, target_level);)
147
148 // Return splinters to freelist.
149 for (chklvl_t l = orig_level + 1; l <= target_level; l ++) {
150 log_trace(metaspace)("ChunkManager %s: return splinter chunk " PTR_FORMAT ", level " CHKLVL_FORMAT ".",
151 _name, p2i(c), c->level());
152 return_chunk_simple(splinters[l]);
153 }
154
155 return c;
156 }
157
158 // Get a chunk and be smart about it.
159 // - 1) Attempt to find a free chunk of exactly the pref_level level
160 // - 2) Failing that, attempt to find a chunk smaller or equal the minimal level.
161 // - 3) Failing that, attempt to find a free chunk of larger size and split it.
162 // - 4) Failing that, attempt to allocate a new chunk from the connected virtual space.
163 // - Failing that, give up and return NULL.
164 // Note: this is not guaranteed to return a *committed* chunk. The chunk manager will
165 // attempt to commit the returned chunk according to constants::committed_words_on_fresh_chunks;
166 // but this may fail if we hit a commit limit. In that case, a partly uncommit chunk
167 // will be returned, and the commit is attempted again when we allocate from the chunk's
168 // uncommitted area. See also Metachunk::allocate.
169 Metachunk* ChunkManager::get_chunk(chklvl_t min_level, chklvl_t pref_level) {
170
171 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
172
173 DEBUG_ONLY(chklvl::check_valid_level(min_level);)
174 DEBUG_ONLY(chklvl::check_valid_level(pref_level);)
175
176 Metachunk* c = NULL;
177
178 // Tracing
179 log_debug(metaspace)("ChunkManager %s: get chunk: min " CHKLVL_FORMAT ", preferred " CHKLVL_FORMAT ".",
180 _name, min_level, pref_level);
181
182 // 1) Attempt to find a free chunk of exactly the pref_level level
183 c = remove_first_chunk_at_level(pref_level);
184
185 // 2) Failing that, attempt to find a chunk smaller or equal the minimal level.
186 if (c == NULL) {
187 for (chklvl_t lvl = pref_level + 1; lvl <= min_level; lvl ++) {
188 c = remove_first_chunk_at_level(lvl);
189 if (c != NULL) {
190 break;
191 }
192 }
193 }
194
195 // 3) Failing that, attempt to find a free chunk of larger size and split it.
196 if (c == NULL) {
197 for (chklvl_t lvl = pref_level - 1; lvl >= chklvl::ROOT_CHUNK_LEVEL; lvl --) {
198 c = remove_first_chunk_at_level(lvl);
199 if (c != NULL) {
200 // Split chunk; add splinters to freelist
201 c = split_chunk_and_add_splinters(c, pref_level);
202 break;
203 }
204 }
205 }
206
207 // 4) Failing that, attempt to allocate a new chunk from the connected virtual space.
208 if (c == NULL) {
209
210 // Tracing
211 log_debug(metaspace)("ChunkManager %s: need new root chunk.", _name);
212
213 c = _vslist->allocate_root_chunk();
214
215 // This should always work. Note that getting the root chunk may not mean we committed memory.
216 assert(c != NULL, "Unexpected");
217 assert(c->level() == chklvl::LOWEST_CHUNK_LEVEL, "Not a root chunk?");
218
219 // Split this root chunk to the desired chunk size.
220 if (pref_level > c->level()) {
221 c = split_chunk_and_add_splinters(c, pref_level);
222 }
223 }
224
225 // Note that we should at this point have a chunk; should always work. If we hit
226 // a commit limit in the meantime, the chunk may still be uncommitted, but the chunk
227 // itself should exist now.
228 assert(c != NULL, "Unexpected");
229
230 // Before returning the chunk, attempt to commit it according to the handout rules.
231 // If that fails, we ignore the error and return the uncommitted chunk.
232 if (commit_chunk_before_handout(c) == false) {
233 log_info(gc, metaspace)("Failed to commit chunk prior to handout.");
234 }
235
236 DEBUG_ONLY(verify_locked(false);)
237
238 log_debug(metaspace)("ChunkManager %s: handing out chunk " PTR_FORMAT ", level " CHKLVL_FORMAT ".",
239 _name, p2i(c), c->level());
240
241 return c;
242
243 } // ChunkManager::get_chunk
244
245
246 // Return a single chunk to the ChunkManager and adjust accounting. May merge chunk
247 // with neighbors.
248 // Happens after a Classloader was unloaded and releases its metaspace chunks.
249 // !! Note: this may invalidate the chunk. Do not access the chunk after
250 // this function returns !!
251 void ChunkManager::return_chunk(Metachunk* c) {
252
253 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
254
255 DEBUG_ONLY(c->verify(false);)
256
257 log_debug(metaspace)("ChunkManager %s: returning chunk " PTR_FORMAT ", level " CHKLVL_FORMAT ".",
258 _name, p2i(c), c->level());
259
260 const chklvl_t orig_lvl = c->level();
261
262 int num_merged[chklvl::NUM_CHUNK_LEVELS] = { 0 };
263 Metachunk* c2 = c->vsnode()->merge(c, num_merged);
264
265 if (c2 != NULL) {
266 DEBUG_ONLY(c2->verify(false));
267
268 // We did merge chunks and now have a bigger chunk.
269 assert(c2->level() < orig_lvl, "Sanity");
270
271 // Adjust counters - the merged-in chunks have been removed from the free lists, but the counters
272 // in this chunk manager must be adjusted too.
273 size_t size_chunks_removed = 0;
274 for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
275 if (num_merged[l] > 0) {
276 // Since we have a binary tree, we should exactly see one merge per level.
277 assert(num_merged[l] == 1, "sanity");
278 _chunks[l].dec_counter_by(1);
279 size_chunks_removed += chklvl::word_size_for_level(l);
280 }
281 }
282 _total_word_size.decrement_by(size_chunks_removed);
283
284 c = c2;
285 }
286
287 return_chunk_simple(c);
288
289 DEBUG_ONLY(verify_locked(false);)
290
291 }
292
293
294 ChunkManager* ChunkManager::_chunkmanager_class = NULL;
295 ChunkManager* ChunkManager::_chunkmanager_nonclass = NULL;
296
297 void ChunkManager::set_chunkmanager_class(ChunkManager* cm) {
298 assert(_chunkmanager_class == NULL, "Sanity");
299 _chunkmanager_class = cm;
300 }
301
302 void ChunkManager::set_chunkmanager_nonclass(ChunkManager* cm) {
303 assert(_chunkmanager_nonclass == NULL, "Sanity");
304 _chunkmanager_nonclass = cm;
305 }
306
307
308 // Update statistics.
309 void ChunkManager::add_to_statistics(ChunkManagerStatistics* out) const {
310
311 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
312
313 for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) {
314 out->num_chunks[l] += _chunks[l].size();
315 }
316
317 }
318
319 #ifdef ASSERT
320
321 void ChunkManager::verify(bool slow) const {
322 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
323 verify_locked(slow);
324 }
325
326 void ChunkManager::verify_locked(bool slow) const {
327
328 assert_lock_strong(MetaspaceExpand_lock);
329
330 size_t word_size = 0;
331 size_t committed_word_size = 0;
332 for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL;
333 l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++)
334 {
335 const Metachunk* c = _chunks[l].first();
336 int num = 0;
337 while (c) {
338 assert(c->level() == l, "Wrong level");
339 assert(c->is_free(), "Chunk is not free.");
340 num ++;
341 committed_word_size += c->committed_words();
342 c->verify(slow);
343 c = c->next();
344 }
345 assert(num == _chunks[l].size(), "Sanity");
346 word_size += num * chklvl::word_size_for_level(l);
347 }
348 _total_word_size.check(word_size);
349 _committed_word_size.check(committed_word_size);
350
351 }
352
353 #endif // ASSERT
354
355
356 } // namespace metaspace
357
358
359
|