39 #include "memory/metaspaceTracer.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "memory/universe.hpp"
42 #include "runtime/atomic.hpp"
43 #include "runtime/globals.hpp"
44 #include "runtime/init.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutex.hpp"
47 #include "runtime/orderAccess.inline.hpp"
48 #include "services/memTracker.hpp"
49 #include "services/memoryService.hpp"
50 #include "utilities/align.hpp"
51 #include "utilities/copy.hpp"
52 #include "utilities/debug.hpp"
53 #include "utilities/macros.hpp"
54
55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
57
58 // Set this constant to enable slow integrity checking of the free chunk lists
59 const bool metaspace_slow_verify = false;
60
61 size_t const allocation_from_dictionary_limit = 4 * K;
62
63 MetaWord* last_allocated = 0;
64
65 size_t Metaspace::_compressed_class_space_size;
66 const MetaspaceTracer* Metaspace::_tracer = NULL;
67
68 DEBUG_ONLY(bool Metaspace::_frozen = false;)
69
70 // Used in declarations in SpaceManager and ChunkManager
71 enum ChunkIndex {
72 ZeroIndex = 0,
73 SpecializedIndex = ZeroIndex,
74 SmallIndex = SpecializedIndex + 1,
75 MediumIndex = SmallIndex + 1,
76 HumongousIndex = MediumIndex + 1,
77 NumberOfFreeLists = 3,
78 NumberOfInUseLists = 4
79 };
80
81 // Helper, returns a descriptive name for the given index.
82 static const char* chunk_size_name(ChunkIndex index) {
83 switch (index) {
84 case SpecializedIndex:
85 return "specialized";
86 case SmallIndex:
87 return "small";
88 case MediumIndex:
89 return "medium";
90 case HumongousIndex:
91 return "humongous";
92 default:
93 return "Invalid index";
94 }
95 }
96
97 enum ChunkSizes { // in words.
98 ClassSpecializedChunk = 128,
99 SpecializedChunk = 128,
100 ClassSmallChunk = 256,
101 SmallChunk = 512,
102 ClassMediumChunk = 4 * K,
103 MediumChunk = 8 * K
104 };
105
106 static ChunkIndex next_chunk_index(ChunkIndex i) {
107 assert(i < NumberOfInUseLists, "Out of bound");
108 return (ChunkIndex) (i+1);
109 }
110
111 static const char* scale_unit(size_t scale) {
112 switch(scale) {
113 case 1: return "BYTES";
114 case K: return "KB";
115 case M: return "MB";
116 case G: return "GB";
117 default:
118 ShouldNotReachHere();
119 return NULL;
120 }
121 }
122
123 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
124 uint MetaspaceGC::_shrink_factor = 0;
125 bool MetaspaceGC::_should_concurrent_collect = false;
126
127 typedef class FreeList<Metachunk> ChunkList;
128
129 // Manages the global free lists of chunks.
130 class ChunkManager : public CHeapObj<mtInternal> {
131 friend class TestVirtualSpaceNodeTest;
132
133 // Free list of chunks of different sizes.
134 // SpecializedChunk
135 // SmallChunk
136 // MediumChunk
137 ChunkList _free_chunks[NumberOfFreeLists];
138
139 // Return non-humongous chunk list by its index.
140 ChunkList* free_chunks(ChunkIndex index);
141
142 // Returns non-humongous chunk list for the given chunk word size.
143 ChunkList* find_free_chunks_list(size_t word_size);
144
145 // HumongousChunk
146 ChunkTreeDictionary _humongous_dictionary;
147
148 // Returns the humongous chunk dictionary.
149 ChunkTreeDictionary* humongous_dictionary() {
150 return &_humongous_dictionary;
151 }
152
153 // Size, in metaspace words, of all chunks managed by this ChunkManager
154 size_t _free_chunks_total;
155 // Number of chunks in this ChunkManager
156 size_t _free_chunks_count;
157
158 // Update counters after a chunk was added or removed removed.
161
162 // Debug support
163
164 size_t sum_free_chunks();
165 size_t sum_free_chunks_count();
166
167 void locked_verify_free_chunks_total();
168 void slow_locked_verify_free_chunks_total() {
169 if (metaspace_slow_verify) {
170 locked_verify_free_chunks_total();
171 }
172 }
173 void locked_verify_free_chunks_count();
174 void slow_locked_verify_free_chunks_count() {
175 if (metaspace_slow_verify) {
176 locked_verify_free_chunks_count();
177 }
178 }
179 void verify_free_chunks_count();
180
181 struct ChunkManagerStatistics {
182 size_t num_by_type[NumberOfFreeLists];
183 size_t single_size_by_type[NumberOfFreeLists];
184 size_t total_size_by_type[NumberOfFreeLists];
185 size_t num_humongous_chunks;
186 size_t total_size_humongous_chunks;
187 };
188
189 void locked_get_statistics(ChunkManagerStatistics* stat) const;
190 void get_statistics(ChunkManagerStatistics* stat) const;
191 static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
192
193 public:
194
195 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
196 : _free_chunks_total(0), _free_chunks_count(0) {
197 _free_chunks[SpecializedIndex].set_size(specialized_size);
198 _free_chunks[SmallIndex].set_size(small_size);
199 _free_chunks[MediumIndex].set_size(medium_size);
200 }
201
202 // add or delete (return) a chunk to the global freelist.
203 Metachunk* chunk_freelist_allocate(size_t word_size);
204
205 // Map a size to a list index assuming that there are lists
206 // for special, small, medium, and humongous chunks.
207 ChunkIndex list_index(size_t size);
208
209 // Map a given index to the chunk size.
210 size_t size_by_index(ChunkIndex index) const;
211
212 // Take a chunk from the ChunkManager. The chunk is expected to be in
213 // the chunk manager (the freelist if non-humongous, the dictionary if
214 // humongous).
215 void remove_chunk(Metachunk* chunk);
216
217 // Return a single chunk of type index to the ChunkManager.
218 void return_single_chunk(ChunkIndex index, Metachunk* chunk);
219
220 // Add the simple linked list of chunks to the freelist of chunks
221 // of type index.
222 void return_chunk_list(ChunkIndex index, Metachunk* chunk);
223
224 // Total of the space in the free chunks list
225 size_t free_chunks_total_words();
226 size_t free_chunks_total_bytes();
227
228 // Number of chunks in the free chunks list
229 size_t free_chunks_count();
230
231 // Remove from a list by size. Selects list based on size of chunk.
374 public:
375 BlockFreelist();
376 ~BlockFreelist();
377
378 // Get and return a block to the free list
379 MetaWord* get_block(size_t word_size);
380 void return_block(MetaWord* p, size_t word_size);
381
382 size_t total_size() const {
383 size_t result = dictionary()->total_size();
384 if (_small_blocks != NULL) {
385 result = result + _small_blocks->total_size();
386 }
387 return result;
388 }
389
390 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
391 void print_on(outputStream* st) const;
392 };
393
394 // A VirtualSpaceList node.
395 class VirtualSpaceNode : public CHeapObj<mtClass> {
396 friend class VirtualSpaceList;
397
398 // Link to next VirtualSpaceNode
399 VirtualSpaceNode* _next;
400
401 // total in the VirtualSpace
402 MemRegion _reserved;
403 ReservedSpace _rs;
404 VirtualSpace _virtual_space;
405 MetaWord* _top;
406 // count of chunks contained in this VirtualSpace
407 uintx _container_count;
408
409 // Convenience functions to access the _virtual_space
410 char* low() const { return virtual_space()->low(); }
411 char* high() const { return virtual_space()->high(); }
412
413 // The first Metachunk will be allocated at the bottom of the
414 // VirtualSpace
415 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
416
417 // Committed but unused space in the virtual space
418 size_t free_words_in_vs() const;
419 public:
420
421 VirtualSpaceNode(size_t byte_size);
422 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
423 ~VirtualSpaceNode();
424
425 // Convenience functions for logical bottom and end
426 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
427 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
428
429 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
430
431 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
432 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
433
434 bool is_pre_committed() const { return _virtual_space.special(); }
435
436 // address of next available space in _virtual_space;
437 // Accessors
438 VirtualSpaceNode* next() { return _next; }
439 void set_next(VirtualSpaceNode* v) { _next = v; }
440
441 void set_reserved(MemRegion const v) { _reserved = v; }
442 void set_top(MetaWord* v) { _top = v; }
443
444 // Accessors
445 MemRegion* reserved() { return &_reserved; }
446 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
447
448 // Returns true if "word_size" is available in the VirtualSpace
476 bool expand_by(size_t min_words, size_t preferred_words);
477
478 // In preparation for deleting this node, remove all the chunks
479 // in the node from any freelist.
480 void purge(ChunkManager* chunk_manager);
481
482 // If an allocation doesn't fit in the current node a new node is created.
483 // Allocate chunks out of the remaining committed space in this node
484 // to avoid wasting that memory.
485 // This always adds up because all the chunk sizes are multiples of
486 // the smallest chunk size.
487 void retire(ChunkManager* chunk_manager);
488
489 #ifdef ASSERT
490 // Debug support
491 void mangle();
492 #endif
493
494 void print_on(outputStream* st) const;
495 void print_map(outputStream* st, bool is_class) const;
496 };
497
498 #define assert_is_aligned(value, alignment) \
499 assert(is_aligned((value), (alignment)), \
500 SIZE_FORMAT_HEX " is not aligned to " \
501 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
502
503 // Decide if large pages should be committed when the memory is reserved.
504 static bool should_commit_large_pages_when_reserving(size_t bytes) {
505 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
506 size_t words = bytes / BytesPerWord;
507 bool is_class = false; // We never reserve large pages for the class space.
508 if (MetaspaceGC::can_expand(words, is_class) &&
509 MetaspaceGC::allowed_expansion() >= words) {
510 return true;
511 }
512 }
513
514 return false;
515 }
516
517 // byte_size is the size of the associated virtualspace.
518 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
519 assert_is_aligned(bytes, Metaspace::reserve_alignment());
520 bool large_pages = should_commit_large_pages_when_reserving(bytes);
521 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
522
523 if (_rs.is_reserved()) {
524 assert(_rs.base() != NULL, "Catch if we get a NULL address");
525 assert(_rs.size() != 0, "Catch if we get a 0 size");
526 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
527 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
528
529 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
530 }
531 }
532
533 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
534 Metachunk* chunk = first_chunk();
535 Metachunk* invalid_chunk = (Metachunk*) top();
536 while (chunk < invalid_chunk ) {
537 assert(chunk->is_tagged_free(), "Should be tagged free");
538 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
539 chunk_manager->remove_chunk(chunk);
540 assert(chunk->next() == NULL &&
541 chunk->prev() == NULL,
542 "Was not removed from its list");
543 chunk = (Metachunk*) next;
544 }
545 }
546
547 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
548
549 // Format:
550 // <ptr>
551 // <ptr> . .. . . ..
552 // SSxSSMMMMMMMMMMMMMMMMsssXX
553 // 112114444444444444444
554 // <ptr> . .. . . ..
555 // SSxSSMMMMMMMMMMMMMMMMsssXX
556 // 112114444444444444444
557
558 if (bottom() == top()) {
559 return;
560 }
561
562 // First line: dividers for every med-chunk-sized interval
563 // Second line: a dot for the start of a chunk
564 // Third line: a letter per chunk type (x,s,m,h), uppercase if in use.
565
566 const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
567 const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
568 const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
569
570 int line_len = 100;
571 const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
572 line_len = (int)(section_len / spec_chunk_size);
573
574 char* line1 = (char*)os::malloc(line_len, mtInternal);
575 char* line2 = (char*)os::malloc(line_len, mtInternal);
576 char* line3 = (char*)os::malloc(line_len, mtInternal);
577 int pos = 0;
578 const MetaWord* p = bottom();
579 const Metachunk* chunk = (const Metachunk*)p;
580 const MetaWord* chunk_end = p + chunk->word_size();
581 while (p < top()) {
582 if (pos == line_len) {
583 pos = 0;
584 st->fill_to(22);
585 st->print_raw(line1, line_len);
586 st->cr();
587 st->fill_to(22);
588 st->print_raw(line2, line_len);
589 st->cr();
590 }
591 if (pos == 0) {
592 st->print(PTR_FORMAT ":", p2i(p));
593 }
594 if (p == chunk_end) {
595 chunk = (Metachunk*)p;
596 chunk_end = p + chunk->word_size();
597 }
598 if (p == (const MetaWord*)chunk) {
599 // chunk starts.
600 line1[pos] = '.';
601 } else {
602 line1[pos] = ' ';
603 }
604 // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
605 // chunk is in use.
606 const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
607 if (chunk->word_size() == spec_chunk_size) {
608 line2[pos] = chunk_is_free ? 'x' : 'X';
609 } else if (chunk->word_size() == small_chunk_size) {
610 line2[pos] = chunk_is_free ? 's' : 'S';
611 } else if (chunk->word_size() == med_chunk_size) {
612 line2[pos] = chunk_is_free ? 'm' : 'M';
613 } else if (chunk->word_size() > med_chunk_size) {
614 line2[pos] = chunk_is_free ? 'h' : 'H';
615 } else {
616 ShouldNotReachHere();
617 }
618 p += spec_chunk_size;
619 pos ++;
620 }
621 if (pos > 0) {
622 st->fill_to(22);
623 st->print_raw(line1, pos);
624 st->cr();
625 st->fill_to(22);
626 st->print_raw(line2, pos);
627 st->cr();
628 }
629 os::free(line1);
630 os::free(line2);
631 os::free(line3);
632 }
633
634
635 #ifdef ASSERT
636 uintx VirtualSpaceNode::container_count_slow() {
637 uintx count = 0;
638 Metachunk* chunk = first_chunk();
639 Metachunk* invalid_chunk = (Metachunk*) top();
640 while (chunk < invalid_chunk ) {
641 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
642 // Don't count the chunks on the free lists. Those are
643 // still part of the VirtualSpaceNode but not currently
644 // counted.
645 if (!chunk->is_tagged_free()) {
646 count++;
647 }
648 chunk = (Metachunk*) next;
649 }
650 return count;
651 }
652 #endif
653
654 // List of VirtualSpaces for metadata allocation.
655 class VirtualSpaceList : public CHeapObj<mtClass> {
656 friend class VirtualSpaceNode;
657
658 enum VirtualSpaceSizes {
659 VirtualSpaceSize = 256 * K
660 };
661
662 // Head of the list
663 VirtualSpaceNode* _virtual_space_list;
664 // virtual space currently being used for allocations
665 VirtualSpaceNode* _current_virtual_space;
666
667 // Is this VirtualSpaceList used for the compressed class space
668 bool _is_class;
669
670 // Sum of reserved and committed memory in the virtual spaces
671 size_t _reserved_words;
672 size_t _committed_words;
673
905 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
906 size_t adjust_initial_chunk_size(size_t requested) const;
907
908 // Get the initial chunks size for this metaspace type.
909 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
910
911 size_t sum_capacity_in_chunks_in_use() const;
912 size_t sum_used_in_chunks_in_use() const;
913 size_t sum_free_in_chunks_in_use() const;
914 size_t sum_waste_in_chunks_in_use() const;
915 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
916
917 size_t sum_count_in_chunks_in_use();
918 size_t sum_count_in_chunks_in_use(ChunkIndex i);
919
920 Metachunk* get_new_chunk(size_t chunk_word_size);
921
922 // Block allocation and deallocation.
923 // Allocates a block from the current chunk
924 MetaWord* allocate(size_t word_size);
925 // Allocates a block from a small chunk
926 MetaWord* get_small_chunk_and_allocate(size_t word_size);
927
928 // Helper for allocations
929 MetaWord* allocate_work(size_t word_size);
930
931 // Returns a block to the per manager freelist
932 void deallocate(MetaWord* p, size_t word_size);
933
934 // Based on the allocation size and a minimum chunk size,
935 // returned chunk size (for expanding space for chunk allocation).
936 size_t calc_chunk_size(size_t allocation_word_size);
937
938 // Called when an allocation from the current chunk fails.
939 // Gets a new chunk (may require getting a new virtual space),
940 // and allocates from that chunk.
941 MetaWord* grow_and_allocate(size_t word_size);
942
943 // Notify memory usage to MemoryService.
944 void track_metaspace_memory_usage();
945
946 // debugging support.
1061 if (unused >= SmallBlocks::small_block_min_size()) {
1062 return_block(new_block + word_size, unused);
1063 }
1064
1065 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1066 p2i(new_block), word_size);
1067 return new_block;
1068 }
1069
1070 void BlockFreelist::print_on(outputStream* st) const {
1071 dictionary()->print_free_lists(st);
1072 if (_small_blocks != NULL) {
1073 _small_blocks->print_on(st);
1074 }
1075 }
1076
1077 // VirtualSpaceNode methods
1078
1079 VirtualSpaceNode::~VirtualSpaceNode() {
1080 _rs.release();
1081 #ifdef ASSERT
1082 size_t word_size = sizeof(*this) / BytesPerWord;
1083 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1084 #endif
1085 }
1086
1087 size_t VirtualSpaceNode::used_words_in_vs() const {
1088 return pointer_delta(top(), bottom(), sizeof(MetaWord));
1089 }
1090
1091 // Space committed in the VirtualSpace
1092 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1093 return pointer_delta(end(), bottom(), sizeof(MetaWord));
1094 }
1095
1096 size_t VirtualSpaceNode::free_words_in_vs() const {
1097 return pointer_delta(end(), top(), sizeof(MetaWord));
1098 }
1099
1100 // Allocates the chunk from the virtual space only.
1101 // This interface is also used internally for debugging. Not all
1102 // chunks removed here are necessarily used for allocation.
1103 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1104 // Bottom of the new chunk
1105 MetaWord* chunk_limit = top();
1106 assert(chunk_limit != NULL, "Not safe to call this method");
1107
1108 // The virtual spaces are always expanded by the
1109 // commit granularity to enforce the following condition.
1110 // Without this the is_available check will not work correctly.
1111 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1112 "The committed memory doesn't match the expanded memory.");
1113
1114 if (!is_available(chunk_word_size)) {
1115 LogTarget(Debug, gc, metaspace, freelist) lt;
1116 if (lt.is_enabled()) {
1117 LogStream ls(lt);
1118 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1119 // Dump some information about the virtual space that is nearly full
1120 print_on(&ls);
1121 }
1122 return NULL;
1123 }
1124
1125 // Take the space (bump top on the current virtual space).
1126 inc_top(chunk_word_size);
1127
1128 // Initialize the chunk
1129 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1130 return result;
1131 }
1132
1133
1134 // Expand the virtual space (commit more of the reserved space)
1135 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1136 size_t min_bytes = min_words * BytesPerWord;
1137 size_t preferred_bytes = preferred_words * BytesPerWord;
1138
1139 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1140
1141 if (uncommitted < min_bytes) {
1142 return false;
1143 }
1144
1145 size_t commit = MIN2(preferred_bytes, uncommitted);
1146 bool result = virtual_space()->expand_by(commit, false);
1147
1148 assert(result, "Failed to commit memory");
1149
1150 return result;
1151 }
1152
1153 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1154 assert_lock_strong(SpaceManager::expand_lock());
1155 Metachunk* result = take_from_committed(chunk_word_size);
1156 if (result != NULL) {
1157 inc_container_count();
1158 }
1159 return result;
1160 }
1161
1162 bool VirtualSpaceNode::initialize() {
1163
1164 if (!_rs.is_reserved()) {
1165 return false;
1166 }
1167
1168 // These are necessary restriction to make sure that the virtual space always
1169 // grows in steps of Metaspace::commit_alignment(). If both base and size are
1170 // aligned only the middle alignment of the VirtualSpace is used.
1171 assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1172 assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1173
1174 // ReservedSpaces marked as special will have the entire memory
1175 // pre-committed. Setting a committed size will make sure that
1176 // committed_size and actual_committed_size agrees.
1177 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1178
1179 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1180 Metaspace::commit_alignment());
1181 if (result) {
1182 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1183 "Checking that the pre-committed memory was registered by the VirtualSpace");
1184
1185 set_top((MetaWord*)virtual_space()->low());
1186 set_reserved(MemRegion((HeapWord*)_rs.base(),
1187 (HeapWord*)(_rs.base() + _rs.size())));
1188
1189 assert(reserved()->start() == (HeapWord*) _rs.base(),
1190 "Reserved start was not set properly " PTR_FORMAT
1191 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1192 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1193 "Reserved size was not set properly " SIZE_FORMAT
1194 " != " SIZE_FORMAT, reserved()->word_size(),
1195 _rs.size() / BytesPerWord);
1196 }
1197
1198 return result;
1199 }
1200
1201 void VirtualSpaceNode::print_on(outputStream* st) const {
1202 size_t used = used_words_in_vs();
1203 size_t capacity = capacity_words_in_vs();
1204 VirtualSpace* vs = virtual_space();
1205 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1206 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1207 PTR_FORMAT ", " PTR_FORMAT ")",
1208 p2i(vs), capacity / K,
1209 capacity == 0 ? 0 : used * 100 / capacity,
1210 p2i(bottom()), p2i(top()), p2i(end()),
1211 p2i(vs->high_boundary()));
1212 }
1213
1214 #ifdef ASSERT
1215 void VirtualSpaceNode::mangle() {
1216 size_t word_size = capacity_words_in_vs();
1217 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1262 _virtual_space_count++;
1263 }
1264 void VirtualSpaceList::dec_virtual_space_count() {
1265 assert_lock_strong(SpaceManager::expand_lock());
1266 _virtual_space_count--;
1267 }
1268
1269 void ChunkManager::remove_chunk(Metachunk* chunk) {
1270 size_t word_size = chunk->word_size();
1271 ChunkIndex index = list_index(word_size);
1272 if (index != HumongousIndex) {
1273 free_chunks(index)->remove_chunk(chunk);
1274 } else {
1275 humongous_dictionary()->remove_chunk(chunk);
1276 }
1277
1278 // Chunk has been removed from the chunks free list, update counters.
1279 account_for_removed_chunk(chunk);
1280 }
1281
1282 // Walk the list of VirtualSpaceNodes and delete
1283 // nodes with a 0 container_count. Remove Metachunks in
1284 // the node from their respective freelists.
1285 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1286 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1287 assert_lock_strong(SpaceManager::expand_lock());
1288 // Don't use a VirtualSpaceListIterator because this
1289 // list is being changed and a straightforward use of an iterator is not safe.
1290 VirtualSpaceNode* purged_vsl = NULL;
1291 VirtualSpaceNode* prev_vsl = virtual_space_list();
1292 VirtualSpaceNode* next_vsl = prev_vsl;
1293 while (next_vsl != NULL) {
1294 VirtualSpaceNode* vsl = next_vsl;
1295 DEBUG_ONLY(vsl->verify_container_count();)
1296 next_vsl = vsl->next();
1297 // Don't free the current virtual space since it will likely
1298 // be needed soon.
1299 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1300 // Unlink it from the list
1301 if (prev_vsl == vsl) {
1302 // This is the case of the current node being the first node.
1303 assert(vsl == virtual_space_list(), "Expected to be the first node");
1304 set_virtual_space_list(vsl->next());
1305 } else {
1306 prev_vsl->set_next(vsl->next());
1307 }
1308
1309 vsl->purge(chunk_manager);
1310 dec_reserved_words(vsl->reserved_words());
1311 dec_committed_words(vsl->committed_words());
1312 dec_virtual_space_count();
1313 purged_vsl = vsl;
1314 delete vsl;
1315 } else {
1316 prev_vsl = vsl;
1317 }
1318 }
1319 #ifdef ASSERT
1341 if (vsn->contains(ptr)) {
1342 return true;
1343 }
1344 }
1345 return false;
1346 }
1347
1348 void VirtualSpaceList::retire_current_virtual_space() {
1349 assert_lock_strong(SpaceManager::expand_lock());
1350
1351 VirtualSpaceNode* vsn = current_virtual_space();
1352
1353 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1354 Metaspace::chunk_manager_metadata();
1355
1356 vsn->retire(cm);
1357 }
1358
1359 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1360 DEBUG_ONLY(verify_container_count();)
1361 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1362 ChunkIndex index = (ChunkIndex)i;
1363 size_t chunk_size = chunk_manager->size_by_index(index);
1364
1365 while (free_words_in_vs() >= chunk_size) {
1366 Metachunk* chunk = get_chunk_vs(chunk_size);
1367 assert(chunk != NULL, "allocation should have been successful");
1368
1369 chunk_manager->return_single_chunk(index, chunk);
1370 }
1371 DEBUG_ONLY(verify_container_count();)
1372 }
1373 assert(free_words_in_vs() == 0, "should be empty now");
1374 }
1375
1376 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1377 _is_class(false),
1378 _virtual_space_list(NULL),
1379 _current_virtual_space(NULL),
1380 _reserved_words(0),
1381 _committed_words(0),
1382 _virtual_space_count(0) {
1383 MutexLockerEx cl(SpaceManager::expand_lock(),
1384 Mutex::_no_safepoint_check_flag);
1385 create_new_virtual_space(word_size);
1386 }
1387
1388 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1389 _is_class(true),
1390 _virtual_space_list(NULL),
1391 _current_virtual_space(NULL),
1392 _reserved_words(0),
1393 _committed_words(0),
1394 _virtual_space_count(0) {
1395 MutexLockerEx cl(SpaceManager::expand_lock(),
1396 Mutex::_no_safepoint_check_flag);
1397 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1398 bool succeeded = class_entry->initialize();
1399 if (succeeded) {
1400 link_vs(class_entry);
1401 }
1402 }
1403
1404 size_t VirtualSpaceList::free_bytes() {
1405 return current_virtual_space()->free_words_in_vs() * BytesPerWord;
1406 }
1407
1408 // Allocate another meta virtual space and add it to the list.
1409 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1410 assert_lock_strong(SpaceManager::expand_lock());
1411
1412 if (is_class()) {
1413 assert(false, "We currently don't support more than one VirtualSpace for"
1414 " the compressed class space. The initialization of the"
1415 " CCS uses another code path and should not hit this path.");
1416 return false;
1417 }
1418
1419 if (vs_word_size == 0) {
1420 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1421 return false;
1422 }
1423
1424 // Reserve the space
1425 size_t vs_byte_size = vs_word_size * BytesPerWord;
1426 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
1427
1428 // Allocate the meta virtual space and initialize it.
1429 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1430 if (!new_entry->initialize()) {
1431 delete new_entry;
1432 return false;
1433 } else {
1434 assert(new_entry->reserved_words() == vs_word_size,
1435 "Reserved memory size differs from requested memory size");
1436 // ensure lock-free iteration sees fully initialized node
1437 OrderAccess::storestore();
1438 link_vs(new_entry);
1439 return true;
1440 }
1441 }
1442
1443 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1444 if (virtual_space_list() == NULL) {
1445 set_virtual_space_list(new_entry);
1446 } else {
1447 current_virtual_space()->set_next(new_entry);
1448 }
1449 set_current_virtual_space(new_entry);
1466 size_t min_words,
1467 size_t preferred_words) {
1468 size_t before = node->committed_words();
1469
1470 bool result = node->expand_by(min_words, preferred_words);
1471
1472 size_t after = node->committed_words();
1473
1474 // after and before can be the same if the memory was pre-committed.
1475 assert(after >= before, "Inconsistency");
1476 inc_committed_words(after - before);
1477
1478 return result;
1479 }
1480
1481 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1482 assert_is_aligned(min_words, Metaspace::commit_alignment_words());
1483 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
1484 assert(min_words <= preferred_words, "Invalid arguments");
1485
1486 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1487 return false;
1488 }
1489
1490 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1491 if (allowed_expansion_words < min_words) {
1492 return false;
1493 }
1494
1495 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1496
1497 // Commit more memory from the the current virtual space.
1498 bool vs_expanded = expand_node_by(current_virtual_space(),
1499 min_words,
1500 max_expansion_words);
1501 if (vs_expanded) {
1502 return true;
1503 }
1504 retire_current_virtual_space();
1505
1506 // Get another virtual space.
1507 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1508 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
1509
1510 if (create_new_virtual_space(grow_vs_words)) {
1511 if (current_virtual_space()->is_pre_committed()) {
1512 // The memory was pre-committed, so we are done here.
1513 assert(min_words <= current_virtual_space()->committed_words(),
1514 "The new VirtualSpace was pre-committed, so it"
1515 "should be large enough to fit the alloc request.");
1516 return true;
1517 }
1518
1519 return expand_node_by(current_virtual_space(),
1520 min_words,
1521 max_expansion_words);
1522 }
1523
1524 return false;
1525 }
1526
1527 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1528
1529 // Allocate a chunk out of the current virtual space.
1530 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1531
1532 if (next != NULL) {
1533 return next;
1534 }
1535
1536 // The expand amount is currently only determined by the requested sizes
1537 // and not how much committed memory is left in the current virtual space.
1538
1539 size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words());
1540 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1541 if (min_word_size >= preferred_word_size) {
1542 // Can happen when humongous chunks are allocated.
1543 preferred_word_size = min_word_size;
1544 }
1545
1546 bool expanded = expand_by(min_word_size, preferred_word_size);
1547 if (expanded) {
1548 next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1549 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1550 }
1551
1552 return next;
1553 }
1554
1555 void VirtualSpaceList::print_on(outputStream* st) const {
1556 VirtualSpaceListIterator iter(virtual_space_list());
1557 while (iter.repeat()) {
1558 VirtualSpaceNode* node = iter.get_next();
1559 node->print_on(st);
1659
1660 return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
1661 }
1662
1663 void MetaspaceGC::initialize() {
1664 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1665 // we can't do a GC during initialization.
1666 _capacity_until_GC = MaxMetaspaceSize;
1667 }
1668
1669 void MetaspaceGC::post_initialize() {
1670 // Reset the high-water mark once the VM initialization is done.
1671 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1672 }
1673
1674 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1675 // Check if the compressed class space is full.
1676 if (is_class && Metaspace::using_class_space()) {
1677 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1678 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1679 return false;
1680 }
1681 }
1682
1683 // Check if the user has imposed a limit on the metaspace memory.
1684 size_t committed_bytes = MetaspaceAux::committed_bytes();
1685 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1686 return false;
1687 }
1688
1689 return true;
1690 }
1691
1692 size_t MetaspaceGC::allowed_expansion() {
1693 size_t committed_bytes = MetaspaceAux::committed_bytes();
1694 size_t capacity_until_gc = capacity_until_GC();
1695
1696 assert(capacity_until_gc >= committed_bytes,
1697 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1698 capacity_until_gc, committed_bytes);
1699
1700 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1701 size_t left_until_GC = capacity_until_gc - committed_bytes;
1702 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1703
1704 return left_to_commit / BytesPerWord;
1705 }
1706
1707 void MetaspaceGC::compute_new_size() {
1708 assert(_shrink_factor <= 100, "invalid shrink factor");
1709 uint current_shrink_factor = _shrink_factor;
1710 _shrink_factor = 0;
1711
1712 // Using committed_bytes() for used_after_gc is an overestimation, since the
1713 // chunk free lists are included in committed_bytes() and the memory in an
1714 // un-fragmented chunk free list is available for future allocations.
1715 // However, if the chunk free lists becomes fragmented, then the memory may
1716 // not be available for future allocations and the memory is therefore "in use".
1717 // Including the chunk free lists in the definition of "in use" is therefore
1718 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1719 // shrink below committed_bytes() and this has caused serious bugs in the past.
1720 const size_t used_after_gc = MetaspaceAux::committed_bytes();
1721 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1722
1931 sum_free_chunks_count());
1932 }
1933
1934 void ChunkManager::verify_free_chunks_count() {
1935 #ifdef ASSERT
1936 MutexLockerEx cl(SpaceManager::expand_lock(),
1937 Mutex::_no_safepoint_check_flag);
1938 locked_verify_free_chunks_count();
1939 #endif
1940 }
1941
1942 void ChunkManager::verify() {
1943 MutexLockerEx cl(SpaceManager::expand_lock(),
1944 Mutex::_no_safepoint_check_flag);
1945 locked_verify();
1946 }
1947
1948 void ChunkManager::locked_verify() {
1949 locked_verify_free_chunks_count();
1950 locked_verify_free_chunks_total();
1951 }
1952
1953 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1954 assert_lock_strong(SpaceManager::expand_lock());
1955 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1956 _free_chunks_total, _free_chunks_count);
1957 }
1958
1959 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1960 assert_lock_strong(SpaceManager::expand_lock());
1961 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1962 sum_free_chunks(), sum_free_chunks_count());
1963 }
1964
1965 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1966 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1967 "Bad index: %d", (int)index);
1968
1969 return &_free_chunks[index];
1970 }
2002 }
2003
2004 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2005 ChunkIndex index = list_index(word_size);
2006 assert(index < HumongousIndex, "No humongous list");
2007 return free_chunks(index);
2008 }
2009
2010 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2011 assert_lock_strong(SpaceManager::expand_lock());
2012
2013 slow_locked_verify();
2014
2015 Metachunk* chunk = NULL;
2016 if (list_index(word_size) != HumongousIndex) {
2017 ChunkList* free_list = find_free_chunks_list(word_size);
2018 assert(free_list != NULL, "Sanity check");
2019
2020 chunk = free_list->head();
2021
2022 if (chunk == NULL) {
2023 return NULL;
2024 }
2025
2026 // Remove the chunk as the head of the list.
2027 free_list->remove_chunk(chunk);
2028
2029 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
2030 p2i(free_list), p2i(chunk), chunk->word_size());
2031 } else {
2032 chunk = humongous_dictionary()->get_chunk(word_size);
2033
2034 if (chunk == NULL) {
2035 return NULL;
2036 }
2037
2038 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2039 chunk->word_size(), word_size, chunk->word_size() - word_size);
2040 }
2041
2042 // Chunk has been removed from the chunk manager; update counters.
2043 account_for_removed_chunk(chunk);
2044
2045 // Remove it from the links to this freelist
2046 chunk->set_next(NULL);
2047 chunk->set_prev(NULL);
2048
2049 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
2050 // work.
2051 chunk->set_is_tagged_free(false);
2052 chunk->container()->inc_container_count();
2053
2054 slow_locked_verify();
2055 return chunk;
2056 }
2057
2058 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2059 assert_lock_strong(SpaceManager::expand_lock());
2060 slow_locked_verify();
2061
2062 // Take from the beginning of the list
2063 Metachunk* chunk = free_chunks_get(word_size);
2064 if (chunk == NULL) {
2065 return NULL;
2066 }
2067
2068 assert((word_size <= chunk->word_size()) ||
2069 (list_index(chunk->word_size()) == HumongousIndex),
2070 "Non-humongous variable sized chunk");
2071 LogTarget(Debug, gc, metaspace, freelist) lt;
2072 if (lt.is_enabled()) {
2073 size_t list_count;
2074 if (list_index(word_size) < HumongousIndex) {
2075 ChunkList* list = find_free_chunks_list(word_size);
2076 list_count = list->count();
2077 } else {
2078 list_count = humongous_dictionary()->total_count();
2079 }
2080 LogStream ls(lt);
2081 ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2082 p2i(this), p2i(chunk), chunk->word_size(), list_count);
2083 ResourceMark rm;
2084 locked_print_free_chunks(&ls);
2085 }
2086
2087 return chunk;
2088 }
2089
2090 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2091 assert_lock_strong(SpaceManager::expand_lock());
2092 assert(chunk != NULL, "Expected chunk.");
2093 assert(chunk->container() != NULL, "Container should have been set.");
2094 assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2095 index_bounds_check(index);
2096
2097 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2098 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2099 // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2100 NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
2101
2102 if (index != HumongousIndex) {
2103 // Return non-humongous chunk to freelist.
2104 ChunkList* list = free_chunks(index);
2105 assert(list->size() == chunk->word_size(), "Wrong chunk type.");
2106 list->return_chunk_at_head(chunk);
2107 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
2108 chunk_size_name(index), p2i(chunk));
2109 } else {
2110 // Return humongous chunk to dictionary.
2111 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
2112 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2113 "Humongous chunk has wrong alignment.");
2114 _humongous_dictionary.return_chunk(chunk);
2115 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2116 chunk_size_name(index), p2i(chunk), chunk->word_size());
2117 }
2118 chunk->container()->dec_container_count();
2119 chunk->set_is_tagged_free(true);
2120
2121 // Chunk has been added; update counters.
2122 account_for_added_chunk(chunk);
2123
2124 }
2125
2126 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
2127 index_bounds_check(index);
2128 if (chunks == NULL) {
2129 return;
2130 }
2131 LogTarget(Trace, gc, metaspace, freelist) log;
2132 if (log.is_enabled()) { // tracing
2133 log.print("returning list of %s chunks...", chunk_size_name(index));
2134 }
2135 unsigned num_chunks_returned = 0;
2136 size_t size_chunks_returned = 0;
2137 Metachunk* cur = chunks;
2138 while (cur != NULL) {
2139 // Capture the next link before it is changed
2140 // by the call to return_chunk_at_head();
2141 Metachunk* next = cur->next();
2142 if (log.is_enabled()) { // tracing
2143 num_chunks_returned ++;
2572
2573 void SpaceManager::initialize() {
2574 Metadebug::init_allocation_fail_alot_count();
2575 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2576 _chunks_in_use[i] = NULL;
2577 }
2578 _current_chunk = NULL;
2579 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2580 }
2581
2582 SpaceManager::~SpaceManager() {
2583 // This call this->_lock which can't be done while holding expand_lock()
2584 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2585 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2586 " allocated_chunks_words() " SIZE_FORMAT,
2587 sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2588
2589 MutexLockerEx fcl(SpaceManager::expand_lock(),
2590 Mutex::_no_safepoint_check_flag);
2591
2592 chunk_manager()->slow_locked_verify();
2593
2594 dec_total_from_size_metrics();
2595
2596 Log(gc, metaspace, freelist) log;
2597 if (log.is_trace()) {
2598 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2599 ResourceMark rm;
2600 LogStream ls(log.trace());
2601 locked_print_chunks_in_use_on(&ls);
2602 if (block_freelists() != NULL) {
2603 block_freelists()->print_on(&ls);
2604 }
2605 }
2606
2607 // Add all the chunks in use by this space manager
2608 // to the global list of free chunks.
2609
2610 // Follow each list of chunks-in-use and add them to the
2611 // free lists. Each list is NULL terminated.
2695 }
2696
2697 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2698 // Get a chunk from the chunk freelist
2699 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2700
2701 if (next == NULL) {
2702 next = vs_list()->get_new_chunk(chunk_word_size,
2703 medium_chunk_bunch());
2704 }
2705
2706 Log(gc, metaspace, alloc) log;
2707 if (log.is_debug() && next != NULL &&
2708 SpaceManager::is_humongous(next->word_size())) {
2709 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size());
2710 }
2711
2712 return next;
2713 }
2714
2715 /*
2716 * The policy is to allocate up to _small_chunk_limit small chunks
2717 * after which only medium chunks are allocated. This is done to
2718 * reduce fragmentation. In some cases, this can result in a lot
2719 * of small chunks being allocated to the point where it's not
2720 * possible to expand. If this happens, there may be no medium chunks
2721 * available and OOME would be thrown. Instead of doing that,
2722 * if the allocation request size fits in a small chunk, an attempt
2723 * will be made to allocate a small chunk.
2724 */
2725 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2726 size_t raw_word_size = get_allocation_word_size(word_size);
2727
2728 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2729 return NULL;
2730 }
2731
2732 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2733 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2734
2735 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2736
2737 MetaWord* mem = NULL;
2738
2739 if (chunk != NULL) {
2740 // Add chunk to the in-use chunk list and do an allocation from it.
2741 // Add to this manager's list of chunks in use.
2742 add_chunk(chunk, false);
2743 mem = chunk->allocate(raw_word_size);
2744
2745 inc_used_metrics(raw_word_size);
2746
2747 // Track metaspace memory usage statistic.
2748 track_metaspace_memory_usage();
2749 }
2750
2751 return mem;
2752 }
2753
2754 MetaWord* SpaceManager::allocate(size_t word_size) {
2755 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2756 size_t raw_word_size = get_allocation_word_size(word_size);
2757 BlockFreelist* fl = block_freelists();
2758 MetaWord* p = NULL;
2759 // Allocation from the dictionary is expensive in the sense that
2760 // the dictionary has to be searched for a size. Don't allocate
2761 // from the dictionary until it starts to get fat. Is this
2762 // a reasonable policy? Maybe an skinny dictionary is fast enough
2763 // for allocations. Do some profiling. JJJ
2764 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
2765 p = fl->get_block(raw_word_size);
2766 }
2767 if (p == NULL) {
2768 p = allocate_work(raw_word_size);
2769 }
2770
2771 return p;
2772 }
2773
2791 result = grow_and_allocate(word_size);
2792 }
2793
2794 if (result != NULL) {
2795 inc_used_metrics(word_size);
2796 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2797 "Head of the list is being allocated");
2798 }
2799
2800 return result;
2801 }
2802
2803 void SpaceManager::verify() {
2804 // If there are blocks in the dictionary, then
2805 // verification of chunks does not work since
2806 // being in the dictionary alters a chunk.
2807 if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
2808 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2809 Metachunk* curr = chunks_in_use(i);
2810 while (curr != NULL) {
2811 curr->verify();
2812 verify_chunk_size(curr);
2813 curr = curr->next();
2814 }
2815 }
2816 }
2817 }
2818
2819 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2820 assert(is_humongous(chunk->word_size()) ||
2821 chunk->word_size() == medium_chunk_size() ||
2822 chunk->word_size() == small_chunk_size() ||
2823 chunk->word_size() == specialized_chunk_size(),
2824 "Chunk size is wrong");
2825 return;
2826 }
2827
2828 #ifdef ASSERT
2829 void SpaceManager::verify_allocated_blocks_words() {
2830 // Verification is only guaranteed at a safepoint.
2831 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2832 "Verification can fail if the applications is running");
3633 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3634 if (_class_space_list != NULL) {
3635 address base = (address)_class_space_list->current_virtual_space()->bottom();
3636 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3637 compressed_class_space_size(), p2i(base));
3638 if (requested_addr != 0) {
3639 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3640 }
3641 st->cr();
3642 }
3643 }
3644
3645 // For UseCompressedClassPointers the class space is reserved above the top of
3646 // the Java heap. The argument passed in is at the base of the compressed space.
3647 void Metaspace::initialize_class_space(ReservedSpace rs) {
3648 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3649 assert(rs.size() >= CompressedClassSpaceSize,
3650 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
3651 assert(using_class_space(), "Must be using class space");
3652 _class_space_list = new VirtualSpaceList(rs);
3653 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3654
3655 if (!_class_space_list->initialization_succeeded()) {
3656 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3657 }
3658 }
3659
3660 #endif
3661
3662 void Metaspace::ergo_initialize() {
3663 if (DumpSharedSpaces) {
3664 // Using large pages when dumping the shared archive is currently not implemented.
3665 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3666 }
3667
3668 size_t page_size = os::vm_page_size();
3669 if (UseLargePages && UseLargePagesInMetaspace) {
3670 page_size = os::large_page_size();
3671 }
3672
3673 _commit_alignment = page_size;
3740 }
3741 #endif // _LP64
3742 }
3743
3744 // Initialize these before initializing the VirtualSpaceList
3745 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3746 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3747 // Make the first class chunk bigger than a medium chunk so it's not put
3748 // on the medium chunk list. The next chunk will be small and progress
3749 // from there. This size calculated by -version.
3750 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3751 (CompressedClassSpaceSize/BytesPerWord)*2);
3752 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3753 // Arbitrarily set the initial virtual space to a multiple
3754 // of the boot class loader size.
3755 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3756 word_size = align_up(word_size, Metaspace::reserve_alignment_words());
3757
3758 // Initialize the list of virtual spaces.
3759 _space_list = new VirtualSpaceList(word_size);
3760 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3761
3762 if (!_space_list->initialization_succeeded()) {
3763 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3764 }
3765
3766 _tracer = new MetaspaceTracer();
3767 }
3768
3769 void Metaspace::post_initialize() {
3770 MetaspaceGC::post_initialize();
3771 }
3772
3773 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3774 Metachunk* chunk = get_initialization_chunk(type, mdtype);
3775 if (chunk != NULL) {
3776 // Add to this manager's list of chunks in use and current_chunk().
3777 get_space_manager(mdtype)->add_chunk(chunk, true);
3778 }
3779 }
3780
3940 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3941
3942 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3943
3944 // Try to allocate metadata.
3945 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3946
3947 if (result == NULL) {
3948 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3949
3950 // Allocation failed.
3951 if (is_init_completed()) {
3952 // Only start a GC if the bootstrapping has completed.
3953
3954 // Try to clean out some memory and retry.
3955 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
3956 }
3957 }
3958
3959 if (result == NULL) {
3960 SpaceManager* sm;
3961 if (is_class_space_allocation(mdtype)) {
3962 sm = loader_data->metaspace_non_null()->class_vsm();
3963 } else {
3964 sm = loader_data->metaspace_non_null()->vsm();
3965 }
3966
3967 result = sm->get_small_chunk_and_allocate(word_size);
3968
3969 if (result == NULL) {
3970 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3971 }
3972 }
3973
3974 // Zero initialize.
3975 Copy::fill_to_words((HeapWord*)result, word_size, 0);
3976
3977 return result;
3978 }
3979
3980 size_t Metaspace::class_chunk_size(size_t word_size) {
3981 assert(using_class_space(), "Has to use class space");
3982 return class_vsm()->calc_chunk_size(word_size);
3983 }
3984
3985 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3986 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3987
3988 // If result is still null, we are out of memory.
3989 Log(gc, metaspace, freelist) log;
3990 if (log.is_info()) {
3991 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3992 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4082
4083 return get_space_list(NonClassType)->contains(ptr);
4084 }
4085
4086 void Metaspace::verify() {
4087 vsm()->verify();
4088 if (using_class_space()) {
4089 class_vsm()->verify();
4090 }
4091 }
4092
4093 void Metaspace::dump(outputStream* const out) const {
4094 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4095 vsm()->dump(out);
4096 if (using_class_space()) {
4097 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4098 class_vsm()->dump(out);
4099 }
4100 }
4101
4102 /////////////// Unit tests ///////////////
4103
4104 #ifndef PRODUCT
4105
4106 class TestMetaspaceAuxTest : AllStatic {
4107 public:
4108 static void test_reserved() {
4109 size_t reserved = MetaspaceAux::reserved_bytes();
4110
4111 assert(reserved > 0, "assert");
4112
4113 size_t committed = MetaspaceAux::committed_bytes();
4114 assert(committed <= reserved, "assert");
4115
4116 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
4117 assert(reserved_metadata > 0, "assert");
4118 assert(reserved_metadata <= reserved, "assert");
4119
4120 if (UseCompressedClassPointers) {
4121 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
4172 words_left = words_left % MediumChunk;
4173
4174 num_small_chunks = words_left / SmallChunk;
4175 words_left = words_left % SmallChunk;
4176 // how many specialized chunks can we get?
4177 num_specialized_chunks = words_left / SpecializedChunk;
4178 assert(words_left % SpecializedChunk == 0, "should be nothing left");
4179 }
4180
4181 public:
4182 static void test() {
4183 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4184 const size_t vsn_test_size_words = MediumChunk * 4;
4185 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
4186
4187 // The chunk sizes must be multiples of eachother, or this will fail
4188 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
4189 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
4190
4191 { // No committed memory in VSN
4192 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4193 VirtualSpaceNode vsn(vsn_test_size_bytes);
4194 vsn.initialize();
4195 vsn.retire(&cm);
4196 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
4197 }
4198
4199 { // All of VSN is committed, half is used by chunks
4200 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4201 VirtualSpaceNode vsn(vsn_test_size_bytes);
4202 vsn.initialize();
4203 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
4204 vsn.get_chunk_vs(MediumChunk);
4205 vsn.get_chunk_vs(MediumChunk);
4206 vsn.retire(&cm);
4207 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
4208 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
4209 }
4210
4211 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
4212 // This doesn't work for systems with vm_page_size >= 16K.
4213 if (page_chunks < MediumChunk) {
4214 // 4 pages of VSN is committed, some is used by chunks
4215 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4216 VirtualSpaceNode vsn(vsn_test_size_bytes);
4217
4218 vsn.initialize();
4219 vsn.expand_by(page_chunks, page_chunks);
4220 vsn.get_chunk_vs(SmallChunk);
4221 vsn.get_chunk_vs(SpecializedChunk);
4222 vsn.retire(&cm);
4223
4224 // committed - used = words left to retire
4225 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
4226
4227 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
4228 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
4229
4230 assert(num_medium_chunks == 0, "should not get any medium chunks");
4231 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
4232 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
4233 }
4234
4235 { // Half of VSN is committed, a humongous chunk is used
4236 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
4237 VirtualSpaceNode vsn(vsn_test_size_bytes);
4238 vsn.initialize();
4239 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
4240 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
4241 vsn.retire(&cm);
4242
4243 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
4244 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
4245 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
4246
4247 assert(num_medium_chunks == 0, "should not get any medium chunks");
4248 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
4249 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
4250 }
4251
4252 }
4253
4254 #define assert_is_available_positive(word_size) \
4255 assert(vsn.is_available(word_size), \
4256 #word_size ": " PTR_FORMAT " bytes were not available in " \
4257 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4258 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4259
4260 #define assert_is_available_negative(word_size) \
4261 assert(!vsn.is_available(word_size), \
4262 #word_size ": " PTR_FORMAT " bytes should not be available in " \
4263 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
4264 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
4265
4266 static void test_is_available_positive() {
4267 // Reserve some memory.
4268 VirtualSpaceNode vsn(os::vm_allocation_granularity());
4269 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4270
4271 // Commit some memory.
4272 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4273 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4274 assert(expanded, "Failed to commit");
4275
4276 // Check that is_available accepts the committed size.
4277 assert_is_available_positive(commit_word_size);
4278
4279 // Check that is_available accepts half the committed size.
4280 size_t expand_word_size = commit_word_size / 2;
4281 assert_is_available_positive(expand_word_size);
4282 }
4283
4284 static void test_is_available_negative() {
4285 // Reserve some memory.
4286 VirtualSpaceNode vsn(os::vm_allocation_granularity());
4287 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4288
4289 // Commit some memory.
4290 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4291 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4292 assert(expanded, "Failed to commit");
4293
4294 // Check that is_available doesn't accept a too large size.
4295 size_t two_times_commit_word_size = commit_word_size * 2;
4296 assert_is_available_negative(two_times_commit_word_size);
4297 }
4298
4299 static void test_is_available_overflow() {
4300 // Reserve some memory.
4301 VirtualSpaceNode vsn(os::vm_allocation_granularity());
4302 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4303
4304 // Commit some memory.
4305 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4306 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4307 assert(expanded, "Failed to commit");
4308
4309 // Calculate a size that will overflow the virtual space size.
4310 void* virtual_space_max = (void*)(uintptr_t)-1;
4311 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4312 size_t overflow_size = bottom_to_max + BytesPerWord;
4313 size_t overflow_word_size = overflow_size / BytesPerWord;
4314
4315 // Check that is_available can handle the overflow.
4316 assert_is_available_negative(overflow_word_size);
4317 }
4318
4319 static void test_is_available() {
4320 TestVirtualSpaceNodeTest::test_is_available_positive();
4321 TestVirtualSpaceNodeTest::test_is_available_negative();
4322 TestVirtualSpaceNodeTest::test_is_available_overflow();
4323 }
4324 };
4325
4326 void TestVirtualSpaceNode_test() {
4327 TestVirtualSpaceNodeTest::test();
4328 TestVirtualSpaceNodeTest::test_is_available();
4329 }
4330
4331 // The following test is placed here instead of a gtest / unittest file
4332 // because the ChunkManager class is only available in this file.
4333 void ChunkManager_test_list_index() {
4334 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4335
4336 // Test previous bug where a query for a humongous class metachunk,
4337 // incorrectly matched the non-class medium metachunk size.
4338 {
4339 assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4340
4341 ChunkIndex index = manager.list_index(MediumChunk);
4342
4343 assert(index == HumongousIndex,
4344 "Requested size is larger than ClassMediumChunk,"
4345 " so should return HumongousIndex. Got index: %d", (int)index);
4346 }
4347
4348 // Check the specified sizes as well.
4349 {
4350 ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4351 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
4352 }
4353 {
4354 ChunkIndex index = manager.list_index(ClassSmallChunk);
4355 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
4356 }
4357 {
4358 ChunkIndex index = manager.list_index(ClassMediumChunk);
4359 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
4360 }
4361 {
4362 ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4363 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
4364 }
4365 }
4366
4367 #endif // !PRODUCT
4368
4369 #ifdef ASSERT
4370
4371 // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
4372 // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
4373 // content.
4374 class ChunkManagerReturnTestImpl : public CHeapObj<mtClass> {
4375
4376 VirtualSpaceNode _vsn;
4377 ChunkManager _cm;
4378
4379 // The expected content of the chunk manager.
4380 unsigned _chunks_in_chunkmanager;
4381 size_t _words_in_chunkmanager;
4382
4383 // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
4384 static const int num_chunks = 256;
4385 Metachunk* _pool[num_chunks];
4386
4387 // Helper, return a random position into the chunk pool.
4388 static int get_random_position() {
4389 return os::random() % num_chunks;
4390 }
4391
4392 // Asserts that ChunkManager counters match expectations.
4393 void assert_counters() {
4394 assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
4395 assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
4396 assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
4397 }
4398
4399 // Get a random chunk size. Equal chance to get spec/med/small chunk size or
4400 // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
4401 size_t get_random_chunk_size() {
4402 const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
4403 const int rand = os::random() % 4;
4404 if (rand < 3) {
4405 return sizes[rand];
4406 } else {
4407 // Note: this affects the max. size of space (see _vsn initialization in ctor).
4408 return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
4409 }
4410 }
4411
4412 // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
4413 // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
4414 int next_matching_chunk(int start, bool is_free) const {
4415 assert(start >= 0 && start < num_chunks, "invalid parameter");
4416 int pos = start;
4417 do {
4418 if (++pos == num_chunks) {
4419 pos = 0;
4420 }
4421 if (_pool[pos]->is_tagged_free() == is_free) {
4422 return pos;
4423 }
4424 } while (pos != start);
4425 return -1;
4426 }
4427
4428 // A structure to keep information about a chunk list including which
4429 // chunks are part of this list. This is needed to keep information about a chunk list
4430 // we will to return to the ChunkManager, because the original list will be destroyed.
4431 struct AChunkList {
4432 Metachunk* head;
4433 Metachunk* all[num_chunks];
4434 size_t size;
4435 int num;
4436 ChunkIndex index;
4437 };
4438
4439 // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
4440 // a random chunk list of max. length <list_size> of chunks with the same
4441 // ChunkIndex (chunk size).
4442 // Returns false if list cannot be assembled. List is returned in the <out>
4443 // structure. Returned list may be smaller than <list_size>.
4444 bool assemble_random_chunklist(AChunkList* out, int list_size) {
4445 // Choose a random in-use chunk from the pool...
4446 const int headpos = next_matching_chunk(get_random_position(), false);
4447 if (headpos == -1) {
4448 return false;
4449 }
4450 Metachunk* const head = _pool[headpos];
4451 out->all[0] = head;
4452 assert(head->is_tagged_free() == false, "Chunk state mismatch");
4453 // ..then go from there, chain it up with up to list_size - 1 number of other
4454 // in-use chunks of the same index.
4455 const ChunkIndex index = _cm.list_index(head->word_size());
4456 int num_added = 1;
4457 size_t size_added = head->word_size();
4458 int pos = headpos;
4459 Metachunk* tail = head;
4460 do {
4461 pos = next_matching_chunk(pos, false);
4462 if (pos != headpos) {
4463 Metachunk* c = _pool[pos];
4464 assert(c->is_tagged_free() == false, "Chunk state mismatch");
4465 if (index == _cm.list_index(c->word_size())) {
4466 tail->set_next(c);
4467 c->set_prev(tail);
4468 tail = c;
4469 out->all[num_added] = c;
4470 num_added ++;
4471 size_added += c->word_size();
4472 }
4473 }
4474 } while (num_added < list_size && pos != headpos);
4475 out->head = head;
4476 out->index = index;
4477 out->size = size_added;
4478 out->num = num_added;
4479 return true;
4480 }
4481
4482 // Take a single random chunk from the ChunkManager.
4483 bool take_single_random_chunk_from_chunkmanager() {
4484 assert_counters();
4485 _cm.locked_verify();
4486 int pos = next_matching_chunk(get_random_position(), true);
4487 if (pos == -1) {
4488 return false;
4489 }
4490 Metachunk* c = _pool[pos];
4491 assert(c->is_tagged_free(), "Chunk state mismatch");
4492 // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
4493 // ChunkManager::free_chunks_get() with this chunk's word size. We really want
4494 // to exercise ChunkManager::free_chunks_get() because that one gets called for
4495 // normal chunk allocation.
4496 Metachunk* c2 = _cm.free_chunks_get(c->word_size());
4497 assert(c2 != NULL, "Unexpected.");
4498 assert(!c2->is_tagged_free(), "Chunk state mismatch");
4499 assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
4500 _chunks_in_chunkmanager --;
4501 _words_in_chunkmanager -= c->word_size();
4502 assert_counters();
4503 _cm.locked_verify();
4504 return true;
4505 }
4506
4507 // Returns a single random chunk to the chunk manager. Returns false if that
4508 // was not possible (all chunks are already in the chunk manager).
4509 bool return_single_random_chunk_to_chunkmanager() {
4510 assert_counters();
4511 _cm.locked_verify();
4512 int pos = next_matching_chunk(get_random_position(), false);
4513 if (pos == -1) {
4514 return false;
4515 }
4516 Metachunk* c = _pool[pos];
4517 assert(c->is_tagged_free() == false, "wrong chunk information");
4518 _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
4519 _chunks_in_chunkmanager ++;
4520 _words_in_chunkmanager += c->word_size();
4521 assert(c->is_tagged_free() == true, "wrong chunk information");
4522 assert_counters();
4523 _cm.locked_verify();
4524 return true;
4525 }
4526
4527 // Return a random chunk list to the chunk manager. Returns the length of the
4528 // returned list.
4529 int return_random_chunk_list_to_chunkmanager(int list_size) {
4530 assert_counters();
4531 _cm.locked_verify();
4532 AChunkList aChunkList;
4533 if (!assemble_random_chunklist(&aChunkList, list_size)) {
4534 return 0;
4535 }
4536 // Before returning chunks are returned, they should be tagged in use.
4537 for (int i = 0; i < aChunkList.num; i ++) {
4538 assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4539 }
4540 _cm.return_chunk_list(aChunkList.index, aChunkList.head);
4541 _chunks_in_chunkmanager += aChunkList.num;
4542 _words_in_chunkmanager += aChunkList.size;
4543 // After all chunks are returned, check that they are now tagged free.
4544 for (int i = 0; i < aChunkList.num; i ++) {
4545 assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
4546 }
4547 assert_counters();
4548 _cm.locked_verify();
4549 return aChunkList.num;
4550 }
4551
4552 public:
4553
4554 ChunkManagerReturnTestImpl()
4555 : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
4556 , _cm(SpecializedChunk, SmallChunk, MediumChunk)
4557 , _chunks_in_chunkmanager(0)
4558 , _words_in_chunkmanager(0)
4559 {
4560 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4561 // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
4562 // "in use", because not yet added to any chunk manager.
4563 _vsn.initialize();
4564 _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
4565 for (int i = 0; i < num_chunks; i ++) {
4566 const size_t size = get_random_chunk_size();
4567 _pool[i] = _vsn.get_chunk_vs(size);
4568 assert(_pool[i] != NULL, "allocation failed");
4569 }
4570 assert_counters();
4571 _cm.locked_verify();
4572 }
4573
4574 // Test entry point.
4575 // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
4576 // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
4577 // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
4578 // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
4579 // chunks manager, thereby emptying or filling it completely.
4580 void do_test(float phase_length_factor) {
4581 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
4582 assert_counters();
4583 // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
4584 const int num_max_ops = num_chunks * 100;
4585 int num_ops = num_max_ops;
4586 const int average_phase_length = (int)(phase_length_factor * num_chunks);
4587 int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4588 bool return_phase = true;
4589 while (num_ops > 0) {
4590 int chunks_moved = 0;
4591 if (return_phase) {
4592 // Randomly switch between returning a single chunk or a random length chunk list.
4593 if (os::random() % 2 == 0) {
4594 if (return_single_random_chunk_to_chunkmanager()) {
4595 chunks_moved = 1;
4596 }
4597 } else {
4598 const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
4599 chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
4600 }
4601 } else {
4602 // Breath out.
4603 if (take_single_random_chunk_from_chunkmanager()) {
4604 chunks_moved = 1;
4605 }
4606 }
4607 num_ops -= chunks_moved;
4608 num_ops_until_switch -= chunks_moved;
4609 if (chunks_moved == 0 || num_ops_until_switch <= 0) {
4610 return_phase = !return_phase;
4611 num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
4612 }
4613 }
4614 }
4615 };
4616
4617 void* setup_chunkmanager_returntests() {
4618 ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
4619 return p;
4620 }
4621
4622 void teardown_chunkmanager_returntests(void* p) {
4623 delete (ChunkManagerReturnTestImpl*) p;
4624 }
4625
4626 void run_chunkmanager_returntests(void* p, float phase_length) {
4627 ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
4628 test->do_test(phase_length);
4629 }
4630
4631 // The following test is placed here instead of a gtest / unittest file
4632 // because the ChunkManager class is only available in this file.
4633 class SpaceManagerTest : AllStatic {
4634 friend void SpaceManager_test_adjust_initial_chunk_size();
4635
4636 static void test_adjust_initial_chunk_size(bool is_class) {
4637 const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4638 const size_t normal = SpaceManager::small_chunk_size(is_class);
4639 const size_t medium = SpaceManager::medium_chunk_size(is_class);
4640
4641 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \
4642 do { \
4643 size_t v = value; \
4644 size_t e = expected; \
4645 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \
4646 "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v); \
4647 } while (0)
4648
4649 // Smallest (specialized)
4650 test_adjust_initial_chunk_size(1, smallest, is_class);
4661 test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4662 test_adjust_initial_chunk_size(medium, medium, is_class);
4663
4664 // Humongous
4665 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4666
4667 #undef test_adjust_initial_chunk_size
4668 }
4669
4670 static void test_adjust_initial_chunk_size() {
4671 test_adjust_initial_chunk_size(false);
4672 test_adjust_initial_chunk_size(true);
4673 }
4674 };
4675
4676 void SpaceManager_test_adjust_initial_chunk_size() {
4677 SpaceManagerTest::test_adjust_initial_chunk_size();
4678 }
4679
4680 #endif // ASSERT
|
39 #include "memory/metaspaceTracer.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "memory/universe.hpp"
42 #include "runtime/atomic.hpp"
43 #include "runtime/globals.hpp"
44 #include "runtime/init.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutex.hpp"
47 #include "runtime/orderAccess.inline.hpp"
48 #include "services/memTracker.hpp"
49 #include "services/memoryService.hpp"
50 #include "utilities/align.hpp"
51 #include "utilities/copy.hpp"
52 #include "utilities/debug.hpp"
53 #include "utilities/macros.hpp"
54
55 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
56 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
57
58 // Set this constant to enable slow integrity checking of the free chunk lists
59 const bool metaspace_slow_verify = DEBUG_ONLY(true) NOT_DEBUG(false);
60
61 // Helper function that does a bunch of checks for a chunk.
62 DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
63
64 // Given a Metachunk, update its in-use information (both in the
65 // chunk and the occupancy map).
66 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
67
68 size_t const allocation_from_dictionary_limit = 4 * K;
69
70 MetaWord* last_allocated = 0;
71
72 size_t Metaspace::_compressed_class_space_size;
73 const MetaspaceTracer* Metaspace::_tracer = NULL;
74
75 DEBUG_ONLY(bool Metaspace::_frozen = false;)
76
77 enum ChunkSizes { // in words.
78 ClassSpecializedChunk = 128,
79 SpecializedChunk = 128,
80 ClassSmallChunk = 256,
81 SmallChunk = 512,
82 ClassMediumChunk = 4 * K,
83 MediumChunk = 8 * K
84 };
85
86 // Returns size of this chunk type.
87 size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
88 assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
89 size_t size = 0;
90 if (is_class) {
91 switch(chunktype) {
92 case SpecializedIndex: size = ClassSpecializedChunk; break;
93 case SmallIndex: size = ClassSmallChunk; break;
94 case MediumIndex: size = ClassMediumChunk; break;
95 default:
96 ShouldNotReachHere();
97 }
98 } else {
99 switch(chunktype) {
100 case SpecializedIndex: size = SpecializedChunk; break;
101 case SmallIndex: size = SmallChunk; break;
102 case MediumIndex: size = MediumChunk; break;
103 default:
104 ShouldNotReachHere();
105 }
106 }
107 return size;
108 }
109
110 ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
111 if (is_class) {
112 if (size == ClassSpecializedChunk) {
113 return SpecializedIndex;
114 } else if (size == ClassSmallChunk) {
115 return SmallIndex;
116 } else if (size == ClassMediumChunk) {
117 return MediumIndex;
118 } else if (size > ClassMediumChunk) {
119 assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
120 return HumongousIndex;
121 }
122 } else {
123 if (size == SpecializedChunk) {
124 return SpecializedIndex;
125 } else if (size == SmallChunk) {
126 return SmallIndex;
127 } else if (size == MediumChunk) {
128 return MediumIndex;
129 } else if (size > MediumChunk) {
130 assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
131 return HumongousIndex;
132 }
133 }
134 ShouldNotReachHere();
135 return (ChunkIndex)-1;
136 }
137
138
139 static ChunkIndex next_chunk_index(ChunkIndex i) {
140 assert(i < NumberOfInUseLists, "Out of bound");
141 return (ChunkIndex) (i+1);
142 }
143
144 static ChunkIndex prev_chunk_index(ChunkIndex i) {
145 assert(i > ZeroIndex, "Out of bound");
146 return (ChunkIndex) (i-1);
147 }
148
149 static const char* scale_unit(size_t scale) {
150 switch(scale) {
151 case 1: return "BYTES";
152 case K: return "KB";
153 case M: return "MB";
154 case G: return "GB";
155 default:
156 ShouldNotReachHere();
157 return NULL;
158 }
159 }
160
161 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
162 uint MetaspaceGC::_shrink_factor = 0;
163 bool MetaspaceGC::_should_concurrent_collect = false;
164
165 typedef class FreeList<Metachunk> ChunkList;
166
167 // Manages the global free lists of chunks.
168 class ChunkManager : public CHeapObj<mtInternal> {
169 friend class TestVirtualSpaceNodeTest;
170
171 // Free list of chunks of different sizes.
172 // SpecializedChunk
173 // SmallChunk
174 // MediumChunk
175 ChunkList _free_chunks[NumberOfFreeLists];
176
177 // Whether or not this is the class chunkmanager.
178 const bool _is_class;
179
180 // Return non-humongous chunk list by its index.
181 ChunkList* free_chunks(ChunkIndex index);
182
183 // Returns non-humongous chunk list for the given chunk word size.
184 ChunkList* find_free_chunks_list(size_t word_size);
185
186 // HumongousChunk
187 ChunkTreeDictionary _humongous_dictionary;
188
189 // Returns the humongous chunk dictionary.
190 ChunkTreeDictionary* humongous_dictionary() {
191 return &_humongous_dictionary;
192 }
193
194 // Size, in metaspace words, of all chunks managed by this ChunkManager
195 size_t _free_chunks_total;
196 // Number of chunks in this ChunkManager
197 size_t _free_chunks_count;
198
199 // Update counters after a chunk was added or removed removed.
202
203 // Debug support
204
205 size_t sum_free_chunks();
206 size_t sum_free_chunks_count();
207
208 void locked_verify_free_chunks_total();
209 void slow_locked_verify_free_chunks_total() {
210 if (metaspace_slow_verify) {
211 locked_verify_free_chunks_total();
212 }
213 }
214 void locked_verify_free_chunks_count();
215 void slow_locked_verify_free_chunks_count() {
216 if (metaspace_slow_verify) {
217 locked_verify_free_chunks_count();
218 }
219 }
220 void verify_free_chunks_count();
221
222 // Given a pointer to a chunk, attempts to merge it with neighboring
223 // free chunks to form a bigger chunk. Returns true if successful.
224 bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
225
226 // Helper for chunk coalescation:
227 // Given an address range with 1-n chunks which are all supposed to be
228 // free and hence currently managed by this ChunkManager, remove them
229 // from this ChunkManager and mark them as invalid.
230 // - This does not correct the occupancy map.
231 // - This does not adjust the counters in ChunkManager.
232 // - Does not adjust container count counter in containing VirtualSpaceNode.
233 // Returns number of chunks removed.
234 int remove_chunks_in_area(MetaWord* p, size_t word_size);
235
236 public:
237
238 struct ChunkManagerStatistics {
239 size_t num_by_type[NumberOfFreeLists];
240 size_t single_size_by_type[NumberOfFreeLists];
241 size_t total_size_by_type[NumberOfFreeLists];
242 size_t num_humongous_chunks;
243 size_t total_size_humongous_chunks;
244 };
245
246 void locked_get_statistics(ChunkManagerStatistics* stat) const;
247 void get_statistics(ChunkManagerStatistics* stat) const;
248 static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
249
250
251 ChunkManager(bool is_class)
252 : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
253 _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
254 _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
255 _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
256 }
257
258 // Add or delete (return) a chunk to the global freelist.
259 Metachunk* chunk_freelist_allocate(size_t word_size);
260
261 // Map a size to a list index assuming that there are lists
262 // for special, small, medium, and humongous chunks.
263 ChunkIndex list_index(size_t size);
264
265 // Map a given index to the chunk size.
266 size_t size_by_index(ChunkIndex index) const;
267
268 bool is_class() const { return _is_class; }
269
270 // Convenience accessors.
271 size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
272 size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
273 size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
274
275 // Take a chunk from the ChunkManager. The chunk is expected to be in
276 // the chunk manager (the freelist if non-humongous, the dictionary if
277 // humongous).
278 void remove_chunk(Metachunk* chunk);
279
280 // Return a single chunk of type index to the ChunkManager.
281 void return_single_chunk(ChunkIndex index, Metachunk* chunk);
282
283 // Add the simple linked list of chunks to the freelist of chunks
284 // of type index.
285 void return_chunk_list(ChunkIndex index, Metachunk* chunk);
286
287 // Total of the space in the free chunks list
288 size_t free_chunks_total_words();
289 size_t free_chunks_total_bytes();
290
291 // Number of chunks in the free chunks list
292 size_t free_chunks_count();
293
294 // Remove from a list by size. Selects list based on size of chunk.
437 public:
438 BlockFreelist();
439 ~BlockFreelist();
440
441 // Get and return a block to the free list
442 MetaWord* get_block(size_t word_size);
443 void return_block(MetaWord* p, size_t word_size);
444
445 size_t total_size() const {
446 size_t result = dictionary()->total_size();
447 if (_small_blocks != NULL) {
448 result = result + _small_blocks->total_size();
449 }
450 return result;
451 }
452
453 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); }
454 void print_on(outputStream* st) const;
455 };
456
457 // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
458 template <typename T> struct all_ones { static const T value; };
459 template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
460 template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
461
462 // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
463 // keeps information about
464 // - where a chunk starts
465 // - whether a chunk is in-use or free
466 // A bit in this bitmap represents one range of memory in the smallest
467 // chunk size (SpecializedChunk or ClassSpecializedChunk).
468 class OccupancyMap : public CHeapObj<mtInternal> {
469
470 // The address range this map covers.
471 const MetaWord* const _reference_address;
472 const size_t _word_size;
473
474 // The word size of a specialized chunk, aka the number of words one
475 // bit in this map represents.
476 const size_t _smallest_chunk_word_size;
477
478 // map data
479 // Data are organized in two bit layers:
480 // The first layer is the chunk-start-map. Here, a bit is set to mark
481 // the corresponding region as the head of a chunk.
482 // The second layer is the in-use-map. Here, a set bit indicates that
483 // the corresponding belongs to a chunk which is in use.
484 uint8_t* _map[2];
485
486 enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
487
488 // length, in bytes, of bitmap data
489 size_t _map_size;
490
491 // Returns true if bit at position pos at bit-layer layer is set.
492 bool get_bit_at_position(unsigned pos, unsigned layer) const {
493 assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
494 const unsigned byteoffset = pos / 8;
495 assert(byteoffset < _map_size,
496 "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
497 const unsigned mask = 1 << (pos % 8);
498 return (_map[layer][byteoffset] & mask) > 0;
499 }
500
501 // Changes bit at position pos at bit-layer layer to value v.
502 void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
503 assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
504 const unsigned byteoffset = pos / 8;
505 assert(byteoffset < _map_size,
506 "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
507 const unsigned mask = 1 << (pos % 8);
508 if (v) {
509 _map[layer][byteoffset] |= mask;
510 } else {
511 _map[layer][byteoffset] &= ~mask;
512 }
513 }
514
515 // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
516 // pos is 32/64 aligned and num_bits is 32/64.
517 // This is the typical case when coalescing to medium chunks, whose size is
518 // 32 or 64 times the specialized chunk size (depending on class or non class
519 // case), so they occupy 64 bits which should be 64bit aligned, because
520 // chunks are chunk-size aligned.
521 template <typename T>
522 bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
523 assert(_map_size > 0, "not initialized");
524 assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
525 assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
526 assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
527 const size_t byteoffset = pos / 8;
528 assert(byteoffset <= (_map_size - sizeof(T)),
529 "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
530 const T w = *(T*)(_map[layer] + byteoffset);
531 return w > 0 ? true : false;
532 }
533
534 // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
535 bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
536 if (pos % 32 == 0 && num_bits == 32) {
537 return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
538 } else if (pos % 64 == 0 && num_bits == 64) {
539 return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
540 } else {
541 for (unsigned n = 0; n < num_bits; n ++) {
542 if (get_bit_at_position(pos + n, layer)) {
543 return true;
544 }
545 }
546 }
547 return false;
548 }
549
550 // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
551 bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
552 assert(word_size % _smallest_chunk_word_size == 0,
553 "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
554 const unsigned pos = get_bitpos_for_address(p);
555 const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
556 return is_any_bit_set_in_region(pos, num_bits, layer);
557 }
558
559 // Optimized case of set_bits_of_region for 32/64bit aligned access:
560 // pos is 32/64 aligned and num_bits is 32/64.
561 // This is the typical case when coalescing to medium chunks, whose size
562 // is 32 or 64 times the specialized chunk size (depending on class or non
563 // class case), so they occupy 64 bits which should be 64bit aligned,
564 // because chunks are chunk-size aligned.
565 template <typename T>
566 void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
567 assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
568 (unsigned)(sizeof(T) * 8), pos);
569 assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
570 num_bits, (unsigned)(sizeof(T) * 8));
571 const size_t byteoffset = pos / 8;
572 assert(byteoffset <= (_map_size - sizeof(T)),
573 "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
574 T* const pw = (T*)(_map[layer] + byteoffset);
575 *pw = v ? all_ones<T>::value : (T) 0;
576 }
577
578 // Set all bits in a region starting at pos to a value.
579 void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
580 assert(_map_size > 0, "not initialized");
581 assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
582 if (pos % 32 == 0 && num_bits == 32) {
583 set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
584 } else if (pos % 64 == 0 && num_bits == 64) {
585 set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
586 } else {
587 for (unsigned n = 0; n < num_bits; n ++) {
588 set_bit_at_position(pos + n, layer, v);
589 }
590 }
591 }
592
593 // Helper: sets all bits in a region [p, p+word_size).
594 void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
595 assert(word_size % _smallest_chunk_word_size == 0,
596 "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
597 const unsigned pos = get_bitpos_for_address(p);
598 const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
599 set_bits_of_region(pos, num_bits, layer, v);
600 }
601
602 // Helper: given an address, return the bit position representing that address.
603 unsigned get_bitpos_for_address(const MetaWord* p) const {
604 assert(_reference_address != NULL, "not initialized");
605 assert(p >= _reference_address && p < _reference_address + _word_size,
606 "Address %p out of range for occupancy map [%p..%p).",
607 p, _reference_address, _reference_address + _word_size);
608 assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
609 "Address not aligned (%p).", p);
610 const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
611 assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
612 return (unsigned) d;
613 }
614
615 public:
616
617 OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
618 _reference_address(reference_address), _word_size(word_size),
619 _smallest_chunk_word_size(smallest_chunk_word_size) {
620 assert(reference_address != NULL, "invalid reference address");
621 assert(is_aligned(reference_address, smallest_chunk_word_size),
622 "Reference address not aligned to smallest chunk size.");
623 assert(is_aligned(word_size, smallest_chunk_word_size),
624 "Word_size shall be a multiple of the smallest chunk size.");
625 // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
626 size_t num_bits = word_size / smallest_chunk_word_size;
627 _map_size = (num_bits + 7) / 8;
628 assert(_map_size * 8 >= num_bits, "sanity");
629 _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
630 _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
631 assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
632 memset(_map[1], 0, _map_size);
633 memset(_map[0], 0, _map_size);
634 // Sanity test: the first respectively last possible chunk start address in
635 // the covered range shall map to the first and last bit in the bitmap.
636 assert(get_bitpos_for_address(reference_address) == 0,
637 "First chunk address in range must map to fist bit in bitmap.");
638 assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
639 "Last chunk address in range must map to last bit in bitmap.");
640 }
641
642 ~OccupancyMap() {
643 os::free(_map[0]);
644 os::free(_map[1]);
645 }
646
647 // Returns true if at address x a chunk is starting.
648 bool chunk_starts_at_address(MetaWord* p) const {
649 const unsigned pos = get_bitpos_for_address(p);
650 return get_bit_at_position(pos, layer_chunk_start_map);
651 }
652
653 void set_chunk_starts_at_address(MetaWord* p, bool v) {
654 const unsigned pos = get_bitpos_for_address(p);
655 set_bit_at_position(pos, layer_chunk_start_map, v);
656 }
657
658 // Removes all chunk-start-bits inside a region, typically as a
659 // result of a coalescation.
660 void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
661 set_bits_of_region(p, word_size, layer_chunk_start_map, false);
662 }
663
664 // Returns true if there are life (in use) chunks in the region limited
665 // by [p, p+word_size).
666 bool is_region_in_use(MetaWord* p, size_t word_size) const {
667 return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
668 }
669
670 // Marks the region starting at p with the size word_size as in use
671 // or free, depending on v.
672 void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
673 set_bits_of_region(p, word_size, layer_in_use_map, v);
674 }
675
676 #ifdef ASSERT
677 // Verify occupancy map for the address range [from, to).
678 // We need to tell it the address range, because the memory the
679 // occupancy map is covering may not be fully comitted yet.
680 void verify(MetaWord* from, MetaWord* to) {
681 Metachunk* chunk = NULL;
682 int nth_bit_for_chunk = 0;
683 MetaWord* chunk_end = NULL;
684 for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
685 const unsigned pos = get_bitpos_for_address(p);
686 // Check the chunk-starts-info:
687 if (get_bit_at_position(pos, layer_chunk_start_map)) {
688 // Chunk start marked in bitmap.
689 chunk = (Metachunk*) p;
690 if (chunk_end != NULL) {
691 assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
692 "the next chunk to start at %p).", p, chunk_end);
693 }
694 assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
695 if (chunk->get_chunk_type() != HumongousIndex) {
696 guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
697 }
698 chunk_end = p + chunk->word_size();
699 nth_bit_for_chunk = 0;
700 assert(chunk_end <= to, "Chunk end overlaps test address range.");
701 } else {
702 // No chunk start marked in bitmap.
703 assert(chunk != NULL, "Chunk should start at start of address range.");
704 assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
705 nth_bit_for_chunk ++;
706 }
707 // Check the in-use-info:
708 const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
709 if (in_use_bit) {
710 assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
711 chunk, nth_bit_for_chunk);
712 } else {
713 assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
714 chunk, nth_bit_for_chunk);
715 }
716 }
717 }
718
719 // Verify that a given chunk is correctly accounted for in the bitmap.
720 void verify_for_chunk(Metachunk* chunk) {
721 assert(chunk_starts_at_address((MetaWord*) chunk),
722 "No chunk start marked in map for chunk %p.", chunk);
723 // For chunks larger than the minimal chunk size, no other chunk
724 // must start in its area.
725 if (chunk->word_size() > _smallest_chunk_word_size) {
726 assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
727 chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
728 "No chunk must start within another chunk.");
729 }
730 if (!chunk->is_tagged_free()) {
731 assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
732 "Chunk %p is in use but marked as free in map (%d %d).",
733 chunk, chunk->get_chunk_type(), chunk->get_origin());
734 } else {
735 assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
736 "Chunk %p is free but marked as in-use in map (%d %d).",
737 chunk, chunk->get_chunk_type(), chunk->get_origin());
738 }
739 }
740
741 #endif // ASSERT
742
743 };
744
745 // A VirtualSpaceList node.
746 class VirtualSpaceNode : public CHeapObj<mtClass> {
747 friend class VirtualSpaceList;
748
749 // Link to next VirtualSpaceNode
750 VirtualSpaceNode* _next;
751
752 // Whether this node is contained in class or metaspace.
753 const bool _is_class;
754
755 // total in the VirtualSpace
756 MemRegion _reserved;
757 ReservedSpace _rs;
758 VirtualSpace _virtual_space;
759 MetaWord* _top;
760 // count of chunks contained in this VirtualSpace
761 uintx _container_count;
762
763 OccupancyMap* _occupancy_map;
764
765 // Convenience functions to access the _virtual_space
766 char* low() const { return virtual_space()->low(); }
767 char* high() const { return virtual_space()->high(); }
768
769 // The first Metachunk will be allocated at the bottom of the
770 // VirtualSpace
771 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
772
773 // Committed but unused space in the virtual space
774 size_t free_words_in_vs() const;
775
776 // True if this node belongs to class metaspace.
777 bool is_class() const { return _is_class; }
778
779 public:
780
781 VirtualSpaceNode(bool is_class, size_t byte_size);
782 VirtualSpaceNode(bool is_class, ReservedSpace rs) :
783 _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
784 ~VirtualSpaceNode();
785
786 // Convenience functions for logical bottom and end
787 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
788 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
789
790 const OccupancyMap* occupancy_map() const { return _occupancy_map; }
791 OccupancyMap* occupancy_map() { return _occupancy_map; }
792
793 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
794
795 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
796 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
797
798 bool is_pre_committed() const { return _virtual_space.special(); }
799
800 // address of next available space in _virtual_space;
801 // Accessors
802 VirtualSpaceNode* next() { return _next; }
803 void set_next(VirtualSpaceNode* v) { _next = v; }
804
805 void set_reserved(MemRegion const v) { _reserved = v; }
806 void set_top(MetaWord* v) { _top = v; }
807
808 // Accessors
809 MemRegion* reserved() { return &_reserved; }
810 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
811
812 // Returns true if "word_size" is available in the VirtualSpace
840 bool expand_by(size_t min_words, size_t preferred_words);
841
842 // In preparation for deleting this node, remove all the chunks
843 // in the node from any freelist.
844 void purge(ChunkManager* chunk_manager);
845
846 // If an allocation doesn't fit in the current node a new node is created.
847 // Allocate chunks out of the remaining committed space in this node
848 // to avoid wasting that memory.
849 // This always adds up because all the chunk sizes are multiples of
850 // the smallest chunk size.
851 void retire(ChunkManager* chunk_manager);
852
853 #ifdef ASSERT
854 // Debug support
855 void mangle();
856 #endif
857
858 void print_on(outputStream* st) const;
859 void print_map(outputStream* st, bool is_class) const;
860
861 // Verify all chunks in this node.
862 void verify();
863
864 };
865
866 #define assert_is_aligned(value, alignment) \
867 assert(is_aligned((value), (alignment)), \
868 SIZE_FORMAT_HEX " is not aligned to " \
869 SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
870
871 // Decide if large pages should be committed when the memory is reserved.
872 static bool should_commit_large_pages_when_reserving(size_t bytes) {
873 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
874 size_t words = bytes / BytesPerWord;
875 bool is_class = false; // We never reserve large pages for the class space.
876 if (MetaspaceGC::can_expand(words, is_class) &&
877 MetaspaceGC::allowed_expansion() >= words) {
878 return true;
879 }
880 }
881
882 return false;
883 }
884
885 // byte_size is the size of the associated virtualspace.
886 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
887 _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
888 assert_is_aligned(bytes, Metaspace::reserve_alignment());
889 bool large_pages = should_commit_large_pages_when_reserving(bytes);
890 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
891
892 if (_rs.is_reserved()) {
893 assert(_rs.base() != NULL, "Catch if we get a NULL address");
894 assert(_rs.size() != 0, "Catch if we get a 0 size");
895 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
896 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
897
898 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
899 }
900 }
901
902 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
903 DEBUG_ONLY(this->verify();)
904 Metachunk* chunk = first_chunk();
905 Metachunk* invalid_chunk = (Metachunk*) top();
906 while (chunk < invalid_chunk ) {
907 assert(chunk->is_tagged_free(), "Should be tagged free");
908 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
909 chunk_manager->remove_chunk(chunk);
910 DEBUG_ONLY(chunk->remove_sentinel();)
911 assert(chunk->next() == NULL &&
912 chunk->prev() == NULL,
913 "Was not removed from its list");
914 chunk = (Metachunk*) next;
915 }
916 }
917
918 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
919
920 if (bottom() == top()) {
921 return;
922 }
923
924 const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
925 const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
926 const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
927
928 int line_len = 100;
929 const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
930 line_len = (int)(section_len / spec_chunk_size);
931
932 #ifdef ASSERT
933 #define NUM_LINES 4
934 #else
935 #define NUM_LINES 2
936 #endif
937
938 char* lines[NUM_LINES];
939 for (int i = 0; i < NUM_LINES; i ++) {
940 lines[i] = (char*)os::malloc(line_len, mtInternal);
941 }
942 int pos = 0;
943 const MetaWord* p = bottom();
944 const Metachunk* chunk = (const Metachunk*)p;
945 const MetaWord* chunk_end = p + chunk->word_size();
946 while (p < top()) {
947 if (pos == line_len) {
948 pos = 0;
949 for (int i = 0; i < NUM_LINES; i ++) {
950 st->fill_to(22);
951 st->print_raw(lines[i], line_len);
952 st->cr();
953 }
954 }
955 if (pos == 0) {
956 st->print(PTR_FORMAT ":", p2i(p));
957 }
958 if (p == chunk_end) {
959 chunk = (Metachunk*)p;
960 chunk_end = p + chunk->word_size();
961 }
962 // line 1: chunk starting points (a dot if that area is a chunk start).
963 lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
964
965 // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
966 // chunk is in use.
967 const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
968 if (chunk->word_size() == spec_chunk_size) {
969 lines[1][pos] = chunk_is_free ? 'x' : 'X';
970 } else if (chunk->word_size() == small_chunk_size) {
971 lines[1][pos] = chunk_is_free ? 's' : 'S';
972 } else if (chunk->word_size() == med_chunk_size) {
973 lines[1][pos] = chunk_is_free ? 'm' : 'M';
974 } else if (chunk->word_size() > med_chunk_size) {
975 lines[1][pos] = chunk_is_free ? 'h' : 'H';
976 } else {
977 ShouldNotReachHere();
978 }
979
980 #ifdef ASSERT
981 // Line 3: chunk origin
982 const ChunkOrigin origin = chunk->get_origin();
983 lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
984
985 // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
986 // but were never used.
987 lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
988 #endif
989
990 p += spec_chunk_size;
991 pos ++;
992 }
993 if (pos > 0) {
994 for (int i = 0; i < NUM_LINES; i ++) {
995 st->fill_to(22);
996 st->print_raw(lines[i], line_len);
997 st->cr();
998 }
999 }
1000 for (int i = 0; i < NUM_LINES; i ++) {
1001 os::free(lines[i]);
1002 }
1003 }
1004
1005
1006 #ifdef ASSERT
1007 uintx VirtualSpaceNode::container_count_slow() {
1008 uintx count = 0;
1009 Metachunk* chunk = first_chunk();
1010 Metachunk* invalid_chunk = (Metachunk*) top();
1011 while (chunk < invalid_chunk ) {
1012 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1013 do_verify_chunk(chunk);
1014 // Don't count the chunks on the free lists. Those are
1015 // still part of the VirtualSpaceNode but not currently
1016 // counted.
1017 if (!chunk->is_tagged_free()) {
1018 count++;
1019 }
1020 chunk = (Metachunk*) next;
1021 }
1022 return count;
1023 }
1024 #endif
1025
1026 // Verify all chunks in this list node.
1027 void VirtualSpaceNode::verify() {
1028 DEBUG_ONLY(verify_container_count();)
1029 Metachunk* chunk = first_chunk();
1030 Metachunk* invalid_chunk = (Metachunk*) top();
1031 // Iterate the chunks in this node and verify each chunk.
1032 // Also verify that space is ideally coalesced, i.e. we did not miss any coalescation chances (there shall be no free chunks
1033 // where a larger free chunk could exist).
1034 const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
1035 const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
1036 int num_free_chunks_since_last_med_boundary = -1;
1037 int num_free_chunks_since_last_small_boundary = -1;
1038 while (chunk < invalid_chunk ) {
1039 // verify each chunk.
1040 DEBUG_ONLY(do_verify_chunk(chunk);)
1041 // Test for missed coalescation opportunities: count number of free chunks since last chunk boundary.
1042 // Reset the counter when encountering a non-free chunk.
1043 if (chunk->get_chunk_type() != HumongousIndex) {
1044 if (chunk->is_tagged_free()) {
1045 if (is_aligned(chunk, size_small)) {
1046 assert(num_free_chunks_since_last_small_boundary <= 1,
1047 "Missed coalescation opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
1048 num_free_chunks_since_last_small_boundary = 0;
1049 } else if (num_free_chunks_since_last_small_boundary != -1) {
1050 num_free_chunks_since_last_small_boundary ++;
1051 }
1052 if (is_aligned(chunk, size_med)) {
1053 assert(num_free_chunks_since_last_med_boundary <= 1,
1054 "Missed coalescation opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
1055 num_free_chunks_since_last_med_boundary = 0;
1056 } else if (num_free_chunks_since_last_med_boundary != -1) {
1057 num_free_chunks_since_last_med_boundary ++;
1058 }
1059 } else {
1060 // Encountering a non-free chunk, reset counters.
1061 num_free_chunks_since_last_med_boundary = -1;
1062 num_free_chunks_since_last_small_boundary = -1;
1063 }
1064 } else {
1065 // One cannot merge areas with a humongous chunk in the middle. Reset counters.
1066 num_free_chunks_since_last_med_boundary = -1;
1067 num_free_chunks_since_last_small_boundary = -1;
1068 }
1069
1070 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
1071 chunk = (Metachunk*) next;
1072 }
1073 // Also verify the whole occupancy map
1074 DEBUG_ONLY(occupancy_map()->verify(this->bottom(), this->top());)
1075 }
1076
1077 // List of VirtualSpaces for metadata allocation.
1078 class VirtualSpaceList : public CHeapObj<mtClass> {
1079 friend class VirtualSpaceNode;
1080
1081 enum VirtualSpaceSizes {
1082 VirtualSpaceSize = 256 * K
1083 };
1084
1085 // Head of the list
1086 VirtualSpaceNode* _virtual_space_list;
1087 // virtual space currently being used for allocations
1088 VirtualSpaceNode* _current_virtual_space;
1089
1090 // Is this VirtualSpaceList used for the compressed class space
1091 bool _is_class;
1092
1093 // Sum of reserved and committed memory in the virtual spaces
1094 size_t _reserved_words;
1095 size_t _committed_words;
1096
1328 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
1329 size_t adjust_initial_chunk_size(size_t requested) const;
1330
1331 // Get the initial chunks size for this metaspace type.
1332 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
1333
1334 size_t sum_capacity_in_chunks_in_use() const;
1335 size_t sum_used_in_chunks_in_use() const;
1336 size_t sum_free_in_chunks_in_use() const;
1337 size_t sum_waste_in_chunks_in_use() const;
1338 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
1339
1340 size_t sum_count_in_chunks_in_use();
1341 size_t sum_count_in_chunks_in_use(ChunkIndex i);
1342
1343 Metachunk* get_new_chunk(size_t chunk_word_size);
1344
1345 // Block allocation and deallocation.
1346 // Allocates a block from the current chunk
1347 MetaWord* allocate(size_t word_size);
1348
1349 // Helper for allocations
1350 MetaWord* allocate_work(size_t word_size);
1351
1352 // Returns a block to the per manager freelist
1353 void deallocate(MetaWord* p, size_t word_size);
1354
1355 // Based on the allocation size and a minimum chunk size,
1356 // returned chunk size (for expanding space for chunk allocation).
1357 size_t calc_chunk_size(size_t allocation_word_size);
1358
1359 // Called when an allocation from the current chunk fails.
1360 // Gets a new chunk (may require getting a new virtual space),
1361 // and allocates from that chunk.
1362 MetaWord* grow_and_allocate(size_t word_size);
1363
1364 // Notify memory usage to MemoryService.
1365 void track_metaspace_memory_usage();
1366
1367 // debugging support.
1482 if (unused >= SmallBlocks::small_block_min_size()) {
1483 return_block(new_block + word_size, unused);
1484 }
1485
1486 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT,
1487 p2i(new_block), word_size);
1488 return new_block;
1489 }
1490
1491 void BlockFreelist::print_on(outputStream* st) const {
1492 dictionary()->print_free_lists(st);
1493 if (_small_blocks != NULL) {
1494 _small_blocks->print_on(st);
1495 }
1496 }
1497
1498 // VirtualSpaceNode methods
1499
1500 VirtualSpaceNode::~VirtualSpaceNode() {
1501 _rs.release();
1502 if (_occupancy_map != NULL) {
1503 delete _occupancy_map;
1504 }
1505 #ifdef ASSERT
1506 size_t word_size = sizeof(*this) / BytesPerWord;
1507 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
1508 #endif
1509 }
1510
1511 size_t VirtualSpaceNode::used_words_in_vs() const {
1512 return pointer_delta(top(), bottom(), sizeof(MetaWord));
1513 }
1514
1515 // Space committed in the VirtualSpace
1516 size_t VirtualSpaceNode::capacity_words_in_vs() const {
1517 return pointer_delta(end(), bottom(), sizeof(MetaWord));
1518 }
1519
1520 size_t VirtualSpaceNode::free_words_in_vs() const {
1521 return pointer_delta(end(), top(), sizeof(MetaWord));
1522 }
1523
1524 // Allocates the chunk from the virtual space only.
1525 // This interface is also used internally for debugging. Not all
1526 // chunks removed here are necessarily used for allocation.
1527 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1528 // Non-humongous chunks are to be allocated aligned to their chunk
1529 // size. So, start addresses of medium chunks are aligned to medium
1530 // chunk size, those of small chunks to small chunk size and so
1531 // forth. This facilitates free chunk coalescation and reduces
1532 // fragmentation. Chunk sizes are spec < small < medium, with each
1533 // larger chunk size being a multiple of the next smaller chunk
1534 // size.
1535 // Because of this alignment, me may need to create a number of padding
1536 // chunks. These chunks are created and added to the freelist.
1537
1538 // The chunk manager to which we will give our padding chunks.
1539 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
1540
1541 // shorthands
1542 const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
1543 const size_t small_word_size = chunk_manager->small_chunk_word_size();
1544 const size_t med_word_size = chunk_manager->medium_chunk_word_size();
1545
1546 assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
1547 chunk_word_size >= med_word_size, "Invalid chunk size requested.");
1548
1549 // Chunk alignment (in bytes) == chunk size unless humongous.
1550 // Humongous chunks are aligned to the smallest chunk size (spec).
1551 const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
1552 spec_word_size : chunk_word_size) * sizeof(MetaWord);
1553
1554 // Do we have enough space to create the requested chunk plus
1555 // any padding chunks needed?
1556 MetaWord* const next_aligned =
1557 static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
1558 if (!is_available((next_aligned - top()) + chunk_word_size)) {
1559 return NULL;
1560 }
1561
1562 // Before allocating the requested chunk, allocate padding chunks if necessary.
1563 // We only need to do this for small or medium chunks: specialized chunks are the
1564 // smallest size, hence always aligned. Homungous chunks are allocated unaligned
1565 // (implicitly, also aligned to smallest chunk size).
1566 if (chunk_word_size == med_word_size || chunk_word_size == small_word_size) {
1567
1568 if (next_aligned > top()) {
1569 log_trace(gc, metaspace, freelist)("Coalescation (%s): creating padding chunks between %p and %p...",
1570 (is_class() ? "class space " : "metaspace"),
1571 top(), next_aligned);
1572 }
1573
1574 // Allocate padding chunks.
1575 while (next_aligned > top()) {
1576 size_t padding_chunk_word_size = small_word_size;
1577 if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
1578 assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
1579 padding_chunk_word_size = spec_word_size;
1580 }
1581 MetaWord* here = top();
1582 assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
1583 inc_top(padding_chunk_word_size);
1584
1585 // Create new padding chunk.
1586 ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
1587 assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
1588
1589 Metachunk* const padding_chunk =
1590 ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
1591 assert(padding_chunk == (Metachunk*)here, "Sanity");
1592 DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
1593 log_trace(gc, metaspace, freelist)("Coalescation (%s): created padding chunk at "
1594 PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
1595 (is_class() ? "class space " : "metaspace"),
1596 p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
1597
1598 // Mark chunk start in occupancy map.
1599 occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
1600
1601 // Chunks are born as in-use (see MetaChunk ctor). So, before returning
1602 // the padding chunk to its chunk manager, mark it as in use (ChunkManager
1603 // will assert that).
1604 do_update_in_use_info_for_chunk(padding_chunk, true);
1605
1606 // Return Chunk to freelist.
1607 inc_container_count();
1608 chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
1609 // Please note: at this point, ChunkManager::return_single_chunk()
1610 // may have merged the padding chunk with neighboring chunks, so
1611 // it may have vanished at this point. Do not reference the padding
1612 // chunk beyond this point.
1613 }
1614
1615 } // End: create padding chunks if necessary.
1616
1617 // Now, top should be aligned correctly.
1618 assert_is_aligned(top(), required_chunk_alignment);
1619
1620 // Bottom of the new chunk
1621 MetaWord* chunk_limit = top();
1622 assert(chunk_limit != NULL, "Not safe to call this method");
1623
1624 // The virtual spaces are always expanded by the
1625 // commit granularity to enforce the following condition.
1626 // Without this the is_available check will not work correctly.
1627 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1628 "The committed memory doesn't match the expanded memory.");
1629
1630 if (!is_available(chunk_word_size)) {
1631 LogTarget(Debug, gc, metaspace, freelist) lt;
1632 if (lt.is_enabled()) {
1633 LogStream ls(lt);
1634 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1635 // Dump some information about the virtual space that is nearly full
1636 print_on(&ls);
1637 }
1638 return NULL;
1639 }
1640
1641 // Take the space (bump top on the current virtual space).
1642 inc_top(chunk_word_size);
1643
1644 // Initialize the chunk
1645 ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
1646 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
1647 assert(result == (Metachunk*)chunk_limit, "Sanity");
1648 occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
1649 do_update_in_use_info_for_chunk(result, true);
1650
1651 inc_container_count();
1652
1653 DEBUG_ONLY(chunk_manager->locked_verify());
1654 DEBUG_ONLY(this->verify());
1655 DEBUG_ONLY(do_verify_chunk(result));
1656
1657 DEBUG_ONLY(result->inc_use_count();)
1658
1659 return result;
1660 }
1661
1662
1663 // Expand the virtual space (commit more of the reserved space)
1664 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1665 size_t min_bytes = min_words * BytesPerWord;
1666 size_t preferred_bytes = preferred_words * BytesPerWord;
1667
1668 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1669
1670 if (uncommitted < min_bytes) {
1671 return false;
1672 }
1673
1674 size_t commit = MIN2(preferred_bytes, uncommitted);
1675 bool result = virtual_space()->expand_by(commit, false);
1676
1677 if (result) {
1678 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
1679 (is_class() ? "class" : "non-class"), commit);
1680 } else {
1681 log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
1682 (is_class() ? "class" : "non-class"), commit);
1683 }
1684
1685 assert(result, "Failed to commit memory");
1686
1687 return result;
1688 }
1689
1690 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
1691 assert_lock_strong(SpaceManager::expand_lock());
1692 Metachunk* result = take_from_committed(chunk_word_size);
1693 return result;
1694 }
1695
1696 bool VirtualSpaceNode::initialize() {
1697
1698 if (!_rs.is_reserved()) {
1699 return false;
1700 }
1701
1702 // These are necessary restriction to make sure that the virtual space always
1703 // grows in steps of Metaspace::commit_alignment(). If both base and size are
1704 // aligned only the middle alignment of the VirtualSpace is used.
1705 assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
1706 assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
1707
1708 // ReservedSpaces marked as special will have the entire memory
1709 // pre-committed. Setting a committed size will make sure that
1710 // committed_size and actual_committed_size agrees.
1711 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
1712
1713 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
1714 Metaspace::commit_alignment());
1715 if (result) {
1716 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
1717 "Checking that the pre-committed memory was registered by the VirtualSpace");
1718
1719 set_top((MetaWord*)virtual_space()->low());
1720 set_reserved(MemRegion((HeapWord*)_rs.base(),
1721 (HeapWord*)(_rs.base() + _rs.size())));
1722
1723 assert(reserved()->start() == (HeapWord*) _rs.base(),
1724 "Reserved start was not set properly " PTR_FORMAT
1725 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base()));
1726 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1727 "Reserved size was not set properly " SIZE_FORMAT
1728 " != " SIZE_FORMAT, reserved()->word_size(),
1729 _rs.size() / BytesPerWord);
1730 }
1731
1732 // Initialize Occupancy Map.
1733 const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
1734 _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
1735
1736 return result;
1737 }
1738
1739 void VirtualSpaceNode::print_on(outputStream* st) const {
1740 size_t used = used_words_in_vs();
1741 size_t capacity = capacity_words_in_vs();
1742 VirtualSpace* vs = virtual_space();
1743 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
1744 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1745 PTR_FORMAT ", " PTR_FORMAT ")",
1746 p2i(vs), capacity / K,
1747 capacity == 0 ? 0 : used * 100 / capacity,
1748 p2i(bottom()), p2i(top()), p2i(end()),
1749 p2i(vs->high_boundary()));
1750 }
1751
1752 #ifdef ASSERT
1753 void VirtualSpaceNode::mangle() {
1754 size_t word_size = capacity_words_in_vs();
1755 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1800 _virtual_space_count++;
1801 }
1802 void VirtualSpaceList::dec_virtual_space_count() {
1803 assert_lock_strong(SpaceManager::expand_lock());
1804 _virtual_space_count--;
1805 }
1806
1807 void ChunkManager::remove_chunk(Metachunk* chunk) {
1808 size_t word_size = chunk->word_size();
1809 ChunkIndex index = list_index(word_size);
1810 if (index != HumongousIndex) {
1811 free_chunks(index)->remove_chunk(chunk);
1812 } else {
1813 humongous_dictionary()->remove_chunk(chunk);
1814 }
1815
1816 // Chunk has been removed from the chunks free list, update counters.
1817 account_for_removed_chunk(chunk);
1818 }
1819
1820 bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
1821 assert_lock_strong(SpaceManager::expand_lock());
1822 assert(chunk != NULL, "invalid chunk pointer");
1823 // Check for valid coalescation combinations.
1824 assert((chunk->get_chunk_type() == SpecializedIndex &&
1825 (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
1826 (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
1827 "Invalid chunk coalescation combination.");
1828
1829 const size_t target_chunk_word_size =
1830 get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
1831
1832 MetaWord* const p_coalescation_start =
1833 (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
1834 MetaWord* const p_coalescation_end =
1835 p_coalescation_start + target_chunk_word_size;
1836
1837 // We need the VirtualSpaceNode containing this chunk and its occupancy map.
1838 VirtualSpaceNode* const vsn = chunk->container();
1839 OccupancyMap* const ocmap = vsn->occupancy_map();
1840
1841 // The potential coalescation range shall be completely contained by the
1842 // committed range of the virtual space node.
1843 if (p_coalescation_start < vsn->bottom() || p_coalescation_end > vsn->top()) {
1844 return false;
1845 }
1846
1847 // Only attempt to coalesce if at the start of the potential
1848 // coalescation range a chunk starts and at the end of the potential
1849 // coalescation range a chunk ends. If that is not the case - so, if
1850 // a chunk straddles either start or end of the coalescation range,
1851 // we cannot coalesce. Note that this should only happen with
1852 // humongous chunks.
1853 if (!ocmap->chunk_starts_at_address(p_coalescation_start)) {
1854 return false;
1855 }
1856
1857 // (A chunk ends at the coalescation range end either if this is the
1858 // end of the used area or if a new chunk starts right away.)
1859 if (p_coalescation_end < vsn->top()) {
1860 if (!ocmap->chunk_starts_at_address(p_coalescation_end)) {
1861 return false;
1862 }
1863 }
1864
1865 // Now check if in the coalescation area there are still life chunks.
1866 if (ocmap->is_region_in_use(p_coalescation_start, target_chunk_word_size)) {
1867 return false;
1868 }
1869
1870 // Success! Remove all chunks in this region...
1871 log_trace(gc, metaspace, freelist)("Coalescation (%s): coalescing chunks in area [%p-%p)...",
1872 (is_class() ? "class space" : "metaspace"),
1873 p_coalescation_start, p_coalescation_end);
1874
1875 const int num_chunks_removed =
1876 remove_chunks_in_area(p_coalescation_start, target_chunk_word_size);
1877
1878 // ... and create a single new bigger chunk.
1879 Metachunk* const p_new_chunk =
1880 ::new (p_coalescation_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
1881 assert(p_new_chunk == (Metachunk*)p_coalescation_start, "Sanity");
1882 DEBUG_ONLY(p_new_chunk->set_origin(origin_coalescation);)
1883
1884 log_trace(gc, metaspace, freelist)("Coalescation (%s): created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
1885 (is_class() ? "class space" : "metaspace"),
1886 p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
1887
1888 // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
1889 ocmap->wipe_chunk_start_bits_in_region(p_coalescation_start, target_chunk_word_size);
1890 ocmap->set_chunk_starts_at_address(p_coalescation_start, true);
1891
1892 // Mark chunk as free. Note: it is not necessary to update the occupancy
1893 // map in-use map, because the old chunks were also free, so nothing
1894 // should have changed.
1895 p_new_chunk->set_is_tagged_free(true);
1896
1897 // Add new chunk to its freelist.
1898 ChunkList* const list = free_chunks(target_chunk_type);
1899 list->return_chunk_at_head(p_new_chunk);
1900
1901 // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
1902 // should not have changed, because the size of the space should be the same)
1903 _free_chunks_count -= num_chunks_removed;
1904 _free_chunks_count ++;
1905
1906 // VirtualSpaceNode::container_count does not have to be modified:
1907 // it means "number of active (non-free) chunks", so coalescation of
1908 // free chunks should not affect that count.
1909
1910 // At the end of a coalescation, run verification tests.
1911 DEBUG_ONLY(this->locked_verify());
1912 DEBUG_ONLY(vsn->verify());
1913
1914 return true;
1915 }
1916
1917 // Remove all chunks in the given area - the chunks are supposed to be free -
1918 // from their corresponding freelists. Mark them as invalid.
1919 // - This does not correct the occupancy map.
1920 // - This does not adjust the counters in ChunkManager.
1921 // - Does not adjust container count counter in containing VirtualSpaceNode
1922 // Returns number of chunks removed.
1923 int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
1924 assert(p != NULL && word_size > 0, "Invalid range.");
1925 const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
1926 assert_is_aligned(word_size, smallest_chunk_size);
1927
1928 Metachunk* const start = (Metachunk*) p;
1929 const Metachunk* const end = (Metachunk*)(p + word_size);
1930 Metachunk* cur = start;
1931 int num_removed = 0;
1932 while (cur < end) {
1933 Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
1934 DEBUG_ONLY(do_verify_chunk(cur));
1935 assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
1936 assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
1937 log_trace(gc, metaspace, freelist)("Coalescation (%s): removing chunk %p, size " SIZE_FORMAT_HEX ".",
1938 (is_class() ? "class space" : "metaspace"),
1939 cur, cur->word_size() * sizeof(MetaWord));
1940 DEBUG_ONLY(cur->remove_sentinel();)
1941 // Note: cannot call ChunkManager::remove_chunk, because that
1942 // modifies the counters in ChunkManager, which we do not want. So
1943 // we call remove_chunk on the freelist directly (see also the
1944 // splitting function which does the same).
1945 ChunkList* const list = free_chunks(list_index(cur->word_size()));
1946 list->remove_chunk(cur);
1947 num_removed ++;
1948 cur = next;
1949 }
1950 return num_removed;
1951 }
1952
1953 // Walk the list of VirtualSpaceNodes and delete
1954 // nodes with a 0 container_count. Remove Metachunks in
1955 // the node from their respective freelists.
1956 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1957 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1958 assert_lock_strong(SpaceManager::expand_lock());
1959 // Don't use a VirtualSpaceListIterator because this
1960 // list is being changed and a straightforward use of an iterator is not safe.
1961 VirtualSpaceNode* purged_vsl = NULL;
1962 VirtualSpaceNode* prev_vsl = virtual_space_list();
1963 VirtualSpaceNode* next_vsl = prev_vsl;
1964 while (next_vsl != NULL) {
1965 VirtualSpaceNode* vsl = next_vsl;
1966 DEBUG_ONLY(vsl->verify_container_count();)
1967 next_vsl = vsl->next();
1968 // Don't free the current virtual space since it will likely
1969 // be needed soon.
1970 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1971 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
1972 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
1973 // Unlink it from the list
1974 if (prev_vsl == vsl) {
1975 // This is the case of the current node being the first node.
1976 assert(vsl == virtual_space_list(), "Expected to be the first node");
1977 set_virtual_space_list(vsl->next());
1978 } else {
1979 prev_vsl->set_next(vsl->next());
1980 }
1981
1982 vsl->purge(chunk_manager);
1983 dec_reserved_words(vsl->reserved_words());
1984 dec_committed_words(vsl->committed_words());
1985 dec_virtual_space_count();
1986 purged_vsl = vsl;
1987 delete vsl;
1988 } else {
1989 prev_vsl = vsl;
1990 }
1991 }
1992 #ifdef ASSERT
2014 if (vsn->contains(ptr)) {
2015 return true;
2016 }
2017 }
2018 return false;
2019 }
2020
2021 void VirtualSpaceList::retire_current_virtual_space() {
2022 assert_lock_strong(SpaceManager::expand_lock());
2023
2024 VirtualSpaceNode* vsn = current_virtual_space();
2025
2026 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
2027 Metaspace::chunk_manager_metadata();
2028
2029 vsn->retire(cm);
2030 }
2031
2032 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
2033 DEBUG_ONLY(verify_container_count();)
2034 assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
2035 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
2036 ChunkIndex index = (ChunkIndex)i;
2037 size_t chunk_size = chunk_manager->size_by_index(index);
2038
2039 while (free_words_in_vs() >= chunk_size) {
2040 Metachunk* chunk = get_chunk_vs(chunk_size);
2041 // Chunk will be allocated aligned, so allocation may require
2042 // additional padding chunks. That may cause above allocation to
2043 // fail. Just ignore the failed allocation and continue with the
2044 // next smaller chunk size. As the VirtualSpaceNode comitted
2045 // size should be a multiple of the smallest chunk size, we
2046 // should always be able to fill the VirtualSpace completely.
2047 if (chunk == NULL) {
2048 break;
2049 }
2050 chunk_manager->return_single_chunk(index, chunk);
2051 }
2052 DEBUG_ONLY(verify_container_count();)
2053 }
2054 assert(free_words_in_vs() == 0, "should be empty now");
2055 }
2056
2057 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
2058 _is_class(false),
2059 _virtual_space_list(NULL),
2060 _current_virtual_space(NULL),
2061 _reserved_words(0),
2062 _committed_words(0),
2063 _virtual_space_count(0) {
2064 MutexLockerEx cl(SpaceManager::expand_lock(),
2065 Mutex::_no_safepoint_check_flag);
2066 create_new_virtual_space(word_size);
2067 }
2068
2069 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
2070 _is_class(true),
2071 _virtual_space_list(NULL),
2072 _current_virtual_space(NULL),
2073 _reserved_words(0),
2074 _committed_words(0),
2075 _virtual_space_count(0) {
2076 MutexLockerEx cl(SpaceManager::expand_lock(),
2077 Mutex::_no_safepoint_check_flag);
2078 VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
2079 bool succeeded = class_entry->initialize();
2080 if (succeeded) {
2081 link_vs(class_entry);
2082 }
2083 }
2084
2085 size_t VirtualSpaceList::free_bytes() {
2086 return current_virtual_space()->free_words_in_vs() * BytesPerWord;
2087 }
2088
2089 // Allocate another meta virtual space and add it to the list.
2090 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
2091 assert_lock_strong(SpaceManager::expand_lock());
2092
2093 if (is_class()) {
2094 assert(false, "We currently don't support more than one VirtualSpace for"
2095 " the compressed class space. The initialization of the"
2096 " CCS uses another code path and should not hit this path.");
2097 return false;
2098 }
2099
2100 if (vs_word_size == 0) {
2101 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
2102 return false;
2103 }
2104
2105 // Reserve the space
2106 size_t vs_byte_size = vs_word_size * BytesPerWord;
2107 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
2108
2109 // Allocate the meta virtual space and initialize it.
2110 VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
2111 if (!new_entry->initialize()) {
2112 delete new_entry;
2113 return false;
2114 } else {
2115 assert(new_entry->reserved_words() == vs_word_size,
2116 "Reserved memory size differs from requested memory size");
2117 // ensure lock-free iteration sees fully initialized node
2118 OrderAccess::storestore();
2119 link_vs(new_entry);
2120 return true;
2121 }
2122 }
2123
2124 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
2125 if (virtual_space_list() == NULL) {
2126 set_virtual_space_list(new_entry);
2127 } else {
2128 current_virtual_space()->set_next(new_entry);
2129 }
2130 set_current_virtual_space(new_entry);
2147 size_t min_words,
2148 size_t preferred_words) {
2149 size_t before = node->committed_words();
2150
2151 bool result = node->expand_by(min_words, preferred_words);
2152
2153 size_t after = node->committed_words();
2154
2155 // after and before can be the same if the memory was pre-committed.
2156 assert(after >= before, "Inconsistency");
2157 inc_committed_words(after - before);
2158
2159 return result;
2160 }
2161
2162 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
2163 assert_is_aligned(min_words, Metaspace::commit_alignment_words());
2164 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
2165 assert(min_words <= preferred_words, "Invalid arguments");
2166
2167 const char* const class_or_not = (is_class() ? "class" : "non-class");
2168
2169 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
2170 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
2171 class_or_not);
2172 return false;
2173 }
2174
2175 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
2176 if (allowed_expansion_words < min_words) {
2177 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
2178 class_or_not);
2179 return false;
2180 }
2181
2182 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
2183
2184 // Commit more memory from the the current virtual space.
2185 bool vs_expanded = expand_node_by(current_virtual_space(),
2186 min_words,
2187 max_expansion_words);
2188 if (vs_expanded) {
2189 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
2190 class_or_not);
2191 return true;
2192 }
2193 log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
2194 class_or_not);
2195 retire_current_virtual_space();
2196
2197 // Get another virtual space.
2198 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
2199 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
2200
2201 if (create_new_virtual_space(grow_vs_words)) {
2202 if (current_virtual_space()->is_pre_committed()) {
2203 // The memory was pre-committed, so we are done here.
2204 assert(min_words <= current_virtual_space()->committed_words(),
2205 "The new VirtualSpace was pre-committed, so it"
2206 "should be large enough to fit the alloc request.");
2207 return true;
2208 }
2209
2210 return expand_node_by(current_virtual_space(),
2211 min_words,
2212 max_expansion_words);
2213 }
2214
2215 return false;
2216 }
2217
2218 // Given a chunk, calculate the largest possible padding space which
2219 // could be required when allocating it.
2220 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
2221 const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
2222 if (chunk_type != HumongousIndex) {
2223 // Normal, non-humongous chunks are allocated at chunk size
2224 // boundaries, so the largest padding space required would be that
2225 // minus the smallest chunk size.
2226 const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
2227 return chunk_word_size - smallest_chunk_size;
2228 } else {
2229 // Humongous chunks are allocated at smallest-chunksize
2230 // boundaries, so there is no padding required.
2231 return 0;
2232 }
2233 }
2234
2235
2236 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
2237
2238 // Allocate a chunk out of the current virtual space.
2239 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2240
2241 if (next != NULL) {
2242 return next;
2243 }
2244
2245 // The expand amount is currently only determined by the requested sizes
2246 // and not how much committed memory is left in the current virtual space.
2247
2248 // We must have enough space for the requested size and any
2249 // additional reqired padding chunks.
2250 const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
2251
2252 size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
2253 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
2254 if (min_word_size >= preferred_word_size) {
2255 // Can happen when humongous chunks are allocated.
2256 preferred_word_size = min_word_size;
2257 }
2258
2259 bool expanded = expand_by(min_word_size, preferred_word_size);
2260 if (expanded) {
2261 next = current_virtual_space()->get_chunk_vs(chunk_word_size);
2262 assert(next != NULL, "The allocation was expected to succeed after the expansion");
2263 }
2264
2265 return next;
2266 }
2267
2268 void VirtualSpaceList::print_on(outputStream* st) const {
2269 VirtualSpaceListIterator iter(virtual_space_list());
2270 while (iter.repeat()) {
2271 VirtualSpaceNode* node = iter.get_next();
2272 node->print_on(st);
2372
2373 return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
2374 }
2375
2376 void MetaspaceGC::initialize() {
2377 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
2378 // we can't do a GC during initialization.
2379 _capacity_until_GC = MaxMetaspaceSize;
2380 }
2381
2382 void MetaspaceGC::post_initialize() {
2383 // Reset the high-water mark once the VM initialization is done.
2384 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
2385 }
2386
2387 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
2388 // Check if the compressed class space is full.
2389 if (is_class && Metaspace::using_class_space()) {
2390 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
2391 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
2392 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
2393 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
2394 return false;
2395 }
2396 }
2397
2398 // Check if the user has imposed a limit on the metaspace memory.
2399 size_t committed_bytes = MetaspaceAux::committed_bytes();
2400 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
2401 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
2402 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
2403 return false;
2404 }
2405
2406 return true;
2407 }
2408
2409 size_t MetaspaceGC::allowed_expansion() {
2410 size_t committed_bytes = MetaspaceAux::committed_bytes();
2411 size_t capacity_until_gc = capacity_until_GC();
2412
2413 assert(capacity_until_gc >= committed_bytes,
2414 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
2415 capacity_until_gc, committed_bytes);
2416
2417 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
2418 size_t left_until_GC = capacity_until_gc - committed_bytes;
2419 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
2420 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
2421 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
2422 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
2423
2424 return left_to_commit / BytesPerWord;
2425 }
2426
2427 void MetaspaceGC::compute_new_size() {
2428 assert(_shrink_factor <= 100, "invalid shrink factor");
2429 uint current_shrink_factor = _shrink_factor;
2430 _shrink_factor = 0;
2431
2432 // Using committed_bytes() for used_after_gc is an overestimation, since the
2433 // chunk free lists are included in committed_bytes() and the memory in an
2434 // un-fragmented chunk free list is available for future allocations.
2435 // However, if the chunk free lists becomes fragmented, then the memory may
2436 // not be available for future allocations and the memory is therefore "in use".
2437 // Including the chunk free lists in the definition of "in use" is therefore
2438 // necessary. Not including the chunk free lists can cause capacity_until_GC to
2439 // shrink below committed_bytes() and this has caused serious bugs in the past.
2440 const size_t used_after_gc = MetaspaceAux::committed_bytes();
2441 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
2442
2651 sum_free_chunks_count());
2652 }
2653
2654 void ChunkManager::verify_free_chunks_count() {
2655 #ifdef ASSERT
2656 MutexLockerEx cl(SpaceManager::expand_lock(),
2657 Mutex::_no_safepoint_check_flag);
2658 locked_verify_free_chunks_count();
2659 #endif
2660 }
2661
2662 void ChunkManager::verify() {
2663 MutexLockerEx cl(SpaceManager::expand_lock(),
2664 Mutex::_no_safepoint_check_flag);
2665 locked_verify();
2666 }
2667
2668 void ChunkManager::locked_verify() {
2669 locked_verify_free_chunks_count();
2670 locked_verify_free_chunks_total();
2671 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
2672 ChunkList* list = free_chunks(i);
2673 if (list != NULL) {
2674 Metachunk* chunk = list->head();
2675 while (chunk) {
2676 DEBUG_ONLY(do_verify_chunk(chunk);)
2677 assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
2678 chunk = chunk->next();
2679 }
2680 }
2681 }
2682 }
2683
2684 void ChunkManager::locked_print_free_chunks(outputStream* st) {
2685 assert_lock_strong(SpaceManager::expand_lock());
2686 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
2687 _free_chunks_total, _free_chunks_count);
2688 }
2689
2690 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
2691 assert_lock_strong(SpaceManager::expand_lock());
2692 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
2693 sum_free_chunks(), sum_free_chunks_count());
2694 }
2695
2696 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
2697 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
2698 "Bad index: %d", (int)index);
2699
2700 return &_free_chunks[index];
2701 }
2733 }
2734
2735 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
2736 ChunkIndex index = list_index(word_size);
2737 assert(index < HumongousIndex, "No humongous list");
2738 return free_chunks(index);
2739 }
2740
2741 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
2742 assert_lock_strong(SpaceManager::expand_lock());
2743
2744 slow_locked_verify();
2745
2746 Metachunk* chunk = NULL;
2747 if (list_index(word_size) != HumongousIndex) {
2748 ChunkList* free_list = find_free_chunks_list(word_size);
2749 assert(free_list != NULL, "Sanity check");
2750
2751 chunk = free_list->head();
2752
2753 // Coalescation: Split large chunks into smaller chunks if there
2754 // are no smaller chunks, just large chunks. This is the
2755 // counterpart of the coalescing-upon-chunk-return.
2756 if (chunk == NULL) {
2757
2758 ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
2759
2760 // Is there a larger chunk we could split?
2761 Metachunk* larger_chunk = NULL;
2762 ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
2763 while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
2764 larger_chunk = free_chunks(larger_chunk_index)->head();
2765 if (larger_chunk == NULL) {
2766 larger_chunk_index = next_chunk_index(larger_chunk_index);
2767 }
2768 }
2769
2770 if (larger_chunk != NULL) {
2771 assert(larger_chunk->word_size() > word_size, "Sanity");
2772 assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
2773
2774 // We found a larger chunk. Lets split it up:
2775 // - remove old chunk
2776 // - in its place, create new smaller chunks, with at least one chunk
2777 // being of target size, the others sized as large as possible. This
2778 // is to make sure the resulting chunks are "as coalesced as possible"
2779 // (similar to VirtualSpaceNode::retire()).
2780 // Note: during this operation both ChunkManager and VirtualSpaceNode
2781 // are temporarily invalid, so be careful with asserts.
2782
2783 log_trace(gc, metaspace, freelist)("Coalescation (%s): splitting chunk " PTR_FORMAT
2784 ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
2785 (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
2786 chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
2787
2788 MetaWord* const region_start = (MetaWord*)larger_chunk;
2789 const size_t region_word_len = larger_chunk->word_size();
2790 MetaWord* const region_end = region_start + region_word_len;
2791 VirtualSpaceNode* const vsn = larger_chunk->container();
2792 OccupancyMap* const ocmap = vsn->occupancy_map();
2793
2794 // Remove old chunk.
2795 free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
2796 DEBUG_ONLY(larger_chunk->remove_sentinel();)
2797
2798 DEBUG_ONLY(larger_chunk = NULL); // Prevent access from here on and wipe area.
2799 DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
2800
2801 // In its place create first the target chunk...
2802 MetaWord* p = region_start;
2803 chunk = ::new (p) Metachunk(target_chunk_index, is_class(), word_size, vsn);
2804 assert(chunk == (Metachunk*)p, "Sanity");
2805 DEBUG_ONLY(chunk->set_origin(origin_split);)
2806
2807 // Note: we do not need to mark its start in the occupancy map
2808 // because it coincides with the old chunk start.
2809
2810 // We are about to return it, so mark it in use and update vsn count.
2811 do_update_in_use_info_for_chunk(chunk, true);
2812 account_for_removed_chunk(chunk);
2813 vsn->inc_container_count();
2814
2815 // This chunk should now be valid and can be verified.
2816 DEBUG_ONLY(do_verify_chunk(chunk));
2817
2818 // In the remaining space create the remainder chunks.
2819 p += chunk->word_size();
2820 assert(p < region_end, "Sanity");
2821
2822 while (p < region_end) {
2823
2824 // Find the largest chunk size which fits the alignment requirements at address p.
2825 ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
2826 size_t this_chunk_word_size = 0;
2827 for(;;) {
2828 this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
2829 if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
2830 break;
2831 } else {
2832 this_chunk_index = prev_chunk_index(this_chunk_index);
2833 assert(this_chunk_index >= target_chunk_index, "Sanity");
2834 }
2835 }
2836
2837 assert(this_chunk_word_size >= word_size, "Sanity");
2838 assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
2839 assert(p + this_chunk_word_size <= region_end, "Sanity");
2840
2841 // Create splitting chunk.
2842 Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
2843 assert(this_chunk == (Metachunk*)p, "Sanity");
2844 DEBUG_ONLY(this_chunk->set_origin(origin_split);)
2845 ocmap->set_chunk_starts_at_address(p, true);
2846 do_update_in_use_info_for_chunk(this_chunk, false);
2847
2848 // This chunk should be valid and can be verified.
2849 DEBUG_ONLY(do_verify_chunk(this_chunk));
2850
2851 // Return this chunk to freelist and correct counter.
2852 free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
2853 _free_chunks_count ++;
2854
2855 log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
2856 SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
2857 p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
2858 p2i(region_start), p2i(region_end));
2859
2860 p += this_chunk_word_size;
2861
2862 }
2863
2864 // ChunkManager and VirtualSpaceNode should be valid at this point.
2865 DEBUG_ONLY(this->locked_verify());
2866
2867 // This will also walk the chunks in the address range and
2868 // verify that we left no "holes".
2869 DEBUG_ONLY(vsn->verify());
2870 DEBUG_ONLY(chunk->container()->verify_container_count());
2871
2872 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get (%s): Returning chunk "
2873 PTR_FORMAT ", word size (" SIZE_FORMAT_HEX " (%s)",
2874 (is_class() ? "class space" : "metaspace"), p2i(chunk), chunk->word_size(), chunk_size_name(chunk->get_chunk_type()));
2875
2876 DEBUG_ONLY(chunk->inc_use_count();)
2877 return chunk;
2878 }
2879
2880 }
2881
2882 if (chunk == NULL) {
2883 return NULL;
2884 }
2885
2886 // Remove the chunk as the head of the list.
2887 free_list->remove_chunk(chunk);
2888
2889 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
2890 p2i(free_list), p2i(chunk), chunk->word_size());
2891 } else {
2892 chunk = humongous_dictionary()->get_chunk(word_size);
2893
2894 if (chunk == NULL) {
2895 return NULL;
2896 }
2897
2898 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
2899 chunk->word_size(), word_size, chunk->word_size() - word_size);
2900 }
2901
2902 // Chunk has been removed from the chunk manager; update counters.
2903 account_for_removed_chunk(chunk);
2904
2905 // Remove it from the links to this freelist
2906 chunk->set_next(NULL);
2907 chunk->set_prev(NULL);
2908
2909 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
2910 // work.
2911 do_update_in_use_info_for_chunk(chunk, true);
2912 chunk->container()->inc_container_count();
2913 DEBUG_ONLY(chunk->container()->verify_container_count());
2914
2915 DEBUG_ONLY(slow_locked_verify());
2916 DEBUG_ONLY(chunk->inc_use_count();)
2917 return chunk;
2918 }
2919
2920 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
2921 assert_lock_strong(SpaceManager::expand_lock());
2922 slow_locked_verify();
2923
2924 // Take from the beginning of the list
2925 Metachunk* chunk = free_chunks_get(word_size);
2926 if (chunk == NULL) {
2927 return NULL;
2928 }
2929
2930 assert((word_size <= chunk->word_size()) ||
2931 (list_index(chunk->word_size()) == HumongousIndex),
2932 "Non-humongous variable sized chunk");
2933 LogTarget(Debug, gc, metaspace, freelist) lt;
2934 if (lt.is_enabled()) {
2935 size_t list_count;
2936 if (list_index(word_size) < HumongousIndex) {
2937 ChunkList* list = find_free_chunks_list(word_size);
2938 list_count = list->count();
2939 } else {
2940 list_count = humongous_dictionary()->total_count();
2941 }
2942 LogStream ls(lt);
2943 ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
2944 p2i(this), p2i(chunk), chunk->word_size(), list_count);
2945 ResourceMark rm;
2946 locked_print_free_chunks(&ls);
2947 }
2948
2949 return chunk;
2950 }
2951
2952 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
2953 assert_lock_strong(SpaceManager::expand_lock());
2954 DEBUG_ONLY(do_verify_chunk(chunk);)
2955 assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
2956 assert(chunk != NULL, "Expected chunk.");
2957 assert(chunk->container() != NULL, "Container should have been set.");
2958 assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
2959 index_bounds_check(index);
2960
2961 // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
2962 // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
2963 // keeps tree node pointers in the chunk payload area which mangle will overwrite.
2964 DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
2965
2966 if (index != HumongousIndex) {
2967 // Return non-humongous chunk to freelist.
2968 ChunkList* list = free_chunks(index);
2969 assert(list->size() == chunk->word_size(), "Wrong chunk type.");
2970 list->return_chunk_at_head(chunk);
2971 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.",
2972 chunk_size_name(index), p2i(chunk));
2973 } else {
2974 // Return humongous chunk to dictionary.
2975 assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type.");
2976 assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0,
2977 "Humongous chunk has wrong alignment.");
2978 _humongous_dictionary.return_chunk(chunk);
2979 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
2980 chunk_size_name(index), p2i(chunk), chunk->word_size());
2981 }
2982 chunk->container()->dec_container_count();
2983 do_update_in_use_info_for_chunk(chunk, false);
2984
2985 // Chunk has been added; update counters.
2986 account_for_added_chunk(chunk);
2987
2988 // Attempt coalesce returned chunks with its neighboring chunks:
2989 // if this chunk is small or special, attempt to coalesce to a medium chunk.
2990 if (index == SmallIndex || index == SpecializedIndex) {
2991 if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
2992 // This did not work. But if this chunk is special, we still may form a small chunk?
2993 if (index == SpecializedIndex) {
2994 if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
2995 // give up.
2996 }
2997 }
2998 }
2999 }
3000
3001 }
3002
3003 void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
3004 index_bounds_check(index);
3005 if (chunks == NULL) {
3006 return;
3007 }
3008 LogTarget(Trace, gc, metaspace, freelist) log;
3009 if (log.is_enabled()) { // tracing
3010 log.print("returning list of %s chunks...", chunk_size_name(index));
3011 }
3012 unsigned num_chunks_returned = 0;
3013 size_t size_chunks_returned = 0;
3014 Metachunk* cur = chunks;
3015 while (cur != NULL) {
3016 // Capture the next link before it is changed
3017 // by the call to return_chunk_at_head();
3018 Metachunk* next = cur->next();
3019 if (log.is_enabled()) { // tracing
3020 num_chunks_returned ++;
3449
3450 void SpaceManager::initialize() {
3451 Metadebug::init_allocation_fail_alot_count();
3452 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3453 _chunks_in_use[i] = NULL;
3454 }
3455 _current_chunk = NULL;
3456 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
3457 }
3458
3459 SpaceManager::~SpaceManager() {
3460 // This call this->_lock which can't be done while holding expand_lock()
3461 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
3462 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
3463 " allocated_chunks_words() " SIZE_FORMAT,
3464 sum_capacity_in_chunks_in_use(), allocated_chunks_words());
3465
3466 MutexLockerEx fcl(SpaceManager::expand_lock(),
3467 Mutex::_no_safepoint_check_flag);
3468
3469 assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
3470 "sum_count_in_chunks_in_use() " SIZE_FORMAT
3471 " allocated_chunks_count() " SIZE_FORMAT,
3472 sum_count_in_chunks_in_use(), allocated_chunks_count());
3473
3474 chunk_manager()->slow_locked_verify();
3475
3476 dec_total_from_size_metrics();
3477
3478 Log(gc, metaspace, freelist) log;
3479 if (log.is_trace()) {
3480 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
3481 ResourceMark rm;
3482 LogStream ls(log.trace());
3483 locked_print_chunks_in_use_on(&ls);
3484 if (block_freelists() != NULL) {
3485 block_freelists()->print_on(&ls);
3486 }
3487 }
3488
3489 // Add all the chunks in use by this space manager
3490 // to the global list of free chunks.
3491
3492 // Follow each list of chunks-in-use and add them to the
3493 // free lists. Each list is NULL terminated.
3577 }
3578
3579 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
3580 // Get a chunk from the chunk freelist
3581 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
3582
3583 if (next == NULL) {
3584 next = vs_list()->get_new_chunk(chunk_word_size,
3585 medium_chunk_bunch());
3586 }
3587
3588 Log(gc, metaspace, alloc) log;
3589 if (log.is_debug() && next != NULL &&
3590 SpaceManager::is_humongous(next->word_size())) {
3591 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size());
3592 }
3593
3594 return next;
3595 }
3596
3597 MetaWord* SpaceManager::allocate(size_t word_size) {
3598 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
3599 size_t raw_word_size = get_allocation_word_size(word_size);
3600 BlockFreelist* fl = block_freelists();
3601 MetaWord* p = NULL;
3602 // Allocation from the dictionary is expensive in the sense that
3603 // the dictionary has to be searched for a size. Don't allocate
3604 // from the dictionary until it starts to get fat. Is this
3605 // a reasonable policy? Maybe an skinny dictionary is fast enough
3606 // for allocations. Do some profiling. JJJ
3607 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) {
3608 p = fl->get_block(raw_word_size);
3609 }
3610 if (p == NULL) {
3611 p = allocate_work(raw_word_size);
3612 }
3613
3614 return p;
3615 }
3616
3634 result = grow_and_allocate(word_size);
3635 }
3636
3637 if (result != NULL) {
3638 inc_used_metrics(word_size);
3639 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
3640 "Head of the list is being allocated");
3641 }
3642
3643 return result;
3644 }
3645
3646 void SpaceManager::verify() {
3647 // If there are blocks in the dictionary, then
3648 // verification of chunks does not work since
3649 // being in the dictionary alters a chunk.
3650 if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
3651 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
3652 Metachunk* curr = chunks_in_use(i);
3653 while (curr != NULL) {
3654 DEBUG_ONLY(do_verify_chunk(curr);)
3655 assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
3656 curr = curr->next();
3657 }
3658 }
3659 }
3660 }
3661
3662 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
3663 assert(is_humongous(chunk->word_size()) ||
3664 chunk->word_size() == medium_chunk_size() ||
3665 chunk->word_size() == small_chunk_size() ||
3666 chunk->word_size() == specialized_chunk_size(),
3667 "Chunk size is wrong");
3668 return;
3669 }
3670
3671 #ifdef ASSERT
3672 void SpaceManager::verify_allocated_blocks_words() {
3673 // Verification is only guaranteed at a safepoint.
3674 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
3675 "Verification can fail if the applications is running");
4476 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
4477 if (_class_space_list != NULL) {
4478 address base = (address)_class_space_list->current_virtual_space()->bottom();
4479 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
4480 compressed_class_space_size(), p2i(base));
4481 if (requested_addr != 0) {
4482 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
4483 }
4484 st->cr();
4485 }
4486 }
4487
4488 // For UseCompressedClassPointers the class space is reserved above the top of
4489 // the Java heap. The argument passed in is at the base of the compressed space.
4490 void Metaspace::initialize_class_space(ReservedSpace rs) {
4491 // The reserved space size may be bigger because of alignment, esp with UseLargePages
4492 assert(rs.size() >= CompressedClassSpaceSize,
4493 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
4494 assert(using_class_space(), "Must be using class space");
4495 _class_space_list = new VirtualSpaceList(rs);
4496 _chunk_manager_class = new ChunkManager(true/*is_class*/);
4497
4498 if (!_class_space_list->initialization_succeeded()) {
4499 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
4500 }
4501 }
4502
4503 #endif
4504
4505 void Metaspace::ergo_initialize() {
4506 if (DumpSharedSpaces) {
4507 // Using large pages when dumping the shared archive is currently not implemented.
4508 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
4509 }
4510
4511 size_t page_size = os::vm_page_size();
4512 if (UseLargePages && UseLargePagesInMetaspace) {
4513 page_size = os::large_page_size();
4514 }
4515
4516 _commit_alignment = page_size;
4583 }
4584 #endif // _LP64
4585 }
4586
4587 // Initialize these before initializing the VirtualSpaceList
4588 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
4589 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
4590 // Make the first class chunk bigger than a medium chunk so it's not put
4591 // on the medium chunk list. The next chunk will be small and progress
4592 // from there. This size calculated by -version.
4593 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
4594 (CompressedClassSpaceSize/BytesPerWord)*2);
4595 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4596 // Arbitrarily set the initial virtual space to a multiple
4597 // of the boot class loader size.
4598 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
4599 word_size = align_up(word_size, Metaspace::reserve_alignment_words());
4600
4601 // Initialize the list of virtual spaces.
4602 _space_list = new VirtualSpaceList(word_size);
4603 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
4604
4605 if (!_space_list->initialization_succeeded()) {
4606 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
4607 }
4608
4609 _tracer = new MetaspaceTracer();
4610 }
4611
4612 void Metaspace::post_initialize() {
4613 MetaspaceGC::post_initialize();
4614 }
4615
4616 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
4617 Metachunk* chunk = get_initialization_chunk(type, mdtype);
4618 if (chunk != NULL) {
4619 // Add to this manager's list of chunks in use and current_chunk().
4620 get_space_manager(mdtype)->add_chunk(chunk, true);
4621 }
4622 }
4623
4783 "ClassLoaderData::the_null_class_loader_data() should have been used.");
4784
4785 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
4786
4787 // Try to allocate metadata.
4788 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
4789
4790 if (result == NULL) {
4791 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
4792
4793 // Allocation failed.
4794 if (is_init_completed()) {
4795 // Only start a GC if the bootstrapping has completed.
4796
4797 // Try to clean out some memory and retry.
4798 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
4799 }
4800 }
4801
4802 if (result == NULL) {
4803 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
4804 }
4805
4806 // Zero initialize.
4807 Copy::fill_to_words((HeapWord*)result, word_size, 0);
4808
4809 return result;
4810 }
4811
4812 size_t Metaspace::class_chunk_size(size_t word_size) {
4813 assert(using_class_space(), "Has to use class space");
4814 return class_vsm()->calc_chunk_size(word_size);
4815 }
4816
4817 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
4818 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
4819
4820 // If result is still null, we are out of memory.
4821 Log(gc, metaspace, freelist) log;
4822 if (log.is_info()) {
4823 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
4824 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
4914
4915 return get_space_list(NonClassType)->contains(ptr);
4916 }
4917
4918 void Metaspace::verify() {
4919 vsm()->verify();
4920 if (using_class_space()) {
4921 class_vsm()->verify();
4922 }
4923 }
4924
4925 void Metaspace::dump(outputStream* const out) const {
4926 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
4927 vsm()->dump(out);
4928 if (using_class_space()) {
4929 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
4930 class_vsm()->dump(out);
4931 }
4932 }
4933
4934 #ifdef ASSERT
4935 static void do_verify_chunk(Metachunk* chunk) {
4936 guarantee(chunk != NULL, "Sanity");
4937 // Verify chunk itself; then verify that it is consistent with the
4938 // occupany map of its containing node.
4939 chunk->verify();
4940 VirtualSpaceNode* const vsn = chunk->container();
4941 OccupancyMap* const ocmap = vsn->occupancy_map();
4942 ocmap->verify_for_chunk(chunk);
4943 }
4944 #endif
4945
4946 static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
4947 chunk->set_is_tagged_free(!inuse);
4948 OccupancyMap* const ocmap = chunk->container()->occupancy_map();
4949 ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
4950 }
4951
4952 /////////////// Unit tests ///////////////
4953
4954 #ifndef PRODUCT
4955
4956 class TestMetaspaceAuxTest : AllStatic {
4957 public:
4958 static void test_reserved() {
4959 size_t reserved = MetaspaceAux::reserved_bytes();
4960
4961 assert(reserved > 0, "assert");
4962
4963 size_t committed = MetaspaceAux::committed_bytes();
4964 assert(committed <= reserved, "assert");
4965
4966 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
4967 assert(reserved_metadata > 0, "assert");
4968 assert(reserved_metadata <= reserved, "assert");
4969
4970 if (UseCompressedClassPointers) {
4971 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
5022 words_left = words_left % MediumChunk;
5023
5024 num_small_chunks = words_left / SmallChunk;
5025 words_left = words_left % SmallChunk;
5026 // how many specialized chunks can we get?
5027 num_specialized_chunks = words_left / SpecializedChunk;
5028 assert(words_left % SpecializedChunk == 0, "should be nothing left");
5029 }
5030
5031 public:
5032 static void test() {
5033 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
5034 const size_t vsn_test_size_words = MediumChunk * 4;
5035 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
5036
5037 // The chunk sizes must be multiples of eachother, or this will fail
5038 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
5039 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
5040
5041 { // No committed memory in VSN
5042 ChunkManager cm(false);
5043 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5044 vsn.initialize();
5045 vsn.retire(&cm);
5046 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
5047 }
5048
5049 { // All of VSN is committed, half is used by chunks
5050 ChunkManager cm(false);
5051 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5052 vsn.initialize();
5053 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
5054 vsn.get_chunk_vs(MediumChunk);
5055 vsn.get_chunk_vs(MediumChunk);
5056 vsn.retire(&cm);
5057 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
5058 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
5059 }
5060
5061 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
5062 // This doesn't work for systems with vm_page_size >= 16K.
5063 if (page_chunks < MediumChunk) {
5064 // 4 pages of VSN is committed, some is used by chunks
5065 ChunkManager cm(false);
5066 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5067
5068 vsn.initialize();
5069 vsn.expand_by(page_chunks, page_chunks);
5070 vsn.get_chunk_vs(SmallChunk);
5071 vsn.get_chunk_vs(SpecializedChunk);
5072 vsn.retire(&cm);
5073
5074 // committed - used = words left to retire
5075 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
5076
5077 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5078 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5079
5080 assert(num_medium_chunks == 0, "should not get any medium chunks");
5081 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5082 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5083 }
5084
5085 { // Half of VSN is committed, a humongous chunk is used
5086 ChunkManager cm(false);
5087 VirtualSpaceNode vsn(false, vsn_test_size_bytes);
5088 vsn.initialize();
5089 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
5090 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
5091 vsn.retire(&cm);
5092
5093 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
5094 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
5095 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
5096
5097 assert(num_medium_chunks == 0, "should not get any medium chunks");
5098 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
5099 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
5100 }
5101
5102 }
5103
5104 #define assert_is_available_positive(word_size) \
5105 assert(vsn.is_available(word_size), \
5106 #word_size ": " PTR_FORMAT " bytes were not available in " \
5107 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5108 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5109
5110 #define assert_is_available_negative(word_size) \
5111 assert(!vsn.is_available(word_size), \
5112 #word_size ": " PTR_FORMAT " bytes should not be available in " \
5113 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
5114 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end()));
5115
5116 static void test_is_available_positive() {
5117 // Reserve some memory.
5118 VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5119 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5120
5121 // Commit some memory.
5122 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5123 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5124 assert(expanded, "Failed to commit");
5125
5126 // Check that is_available accepts the committed size.
5127 assert_is_available_positive(commit_word_size);
5128
5129 // Check that is_available accepts half the committed size.
5130 size_t expand_word_size = commit_word_size / 2;
5131 assert_is_available_positive(expand_word_size);
5132 }
5133
5134 static void test_is_available_negative() {
5135 // Reserve some memory.
5136 VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5137 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5138
5139 // Commit some memory.
5140 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5141 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5142 assert(expanded, "Failed to commit");
5143
5144 // Check that is_available doesn't accept a too large size.
5145 size_t two_times_commit_word_size = commit_word_size * 2;
5146 assert_is_available_negative(two_times_commit_word_size);
5147 }
5148
5149 static void test_is_available_overflow() {
5150 // Reserve some memory.
5151 VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
5152 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
5153
5154 // Commit some memory.
5155 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
5156 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
5157 assert(expanded, "Failed to commit");
5158
5159 // Calculate a size that will overflow the virtual space size.
5160 void* virtual_space_max = (void*)(uintptr_t)-1;
5161 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
5162 size_t overflow_size = bottom_to_max + BytesPerWord;
5163 size_t overflow_word_size = overflow_size / BytesPerWord;
5164
5165 // Check that is_available can handle the overflow.
5166 assert_is_available_negative(overflow_word_size);
5167 }
5168
5169 static void test_is_available() {
5170 TestVirtualSpaceNodeTest::test_is_available_positive();
5171 TestVirtualSpaceNodeTest::test_is_available_negative();
5172 TestVirtualSpaceNodeTest::test_is_available_overflow();
5173 }
5174 };
5175
5176 // The following test is placed here instead of a gtest / unittest file
5177 // because the ChunkManager class is only available in this file.
5178 void ChunkManager_test_list_index() {
5179 ChunkManager manager(true);
5180
5181 // Test previous bug where a query for a humongous class metachunk,
5182 // incorrectly matched the non-class medium metachunk size.
5183 {
5184 assert(MediumChunk > ClassMediumChunk, "Precondition for test");
5185
5186 ChunkIndex index = manager.list_index(MediumChunk);
5187
5188 assert(index == HumongousIndex,
5189 "Requested size is larger than ClassMediumChunk,"
5190 " so should return HumongousIndex. Got index: %d", (int)index);
5191 }
5192
5193 // Check the specified sizes as well.
5194 {
5195 ChunkIndex index = manager.list_index(ClassSpecializedChunk);
5196 assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
5197 }
5198 {
5199 ChunkIndex index = manager.list_index(ClassSmallChunk);
5200 assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
5201 }
5202 {
5203 ChunkIndex index = manager.list_index(ClassMediumChunk);
5204 assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
5205 }
5206 {
5207 ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
5208 assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
5209 }
5210 }
5211
5212 #endif // !PRODUCT
5213
5214 #ifdef ASSERT
5215
5216 // The following test is placed here instead of a gtest / unittest file
5217 // because the ChunkManager class is only available in this file.
5218 class SpaceManagerTest : AllStatic {
5219 friend void SpaceManager_test_adjust_initial_chunk_size();
5220
5221 static void test_adjust_initial_chunk_size(bool is_class) {
5222 const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
5223 const size_t normal = SpaceManager::small_chunk_size(is_class);
5224 const size_t medium = SpaceManager::medium_chunk_size(is_class);
5225
5226 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \
5227 do { \
5228 size_t v = value; \
5229 size_t e = expected; \
5230 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \
5231 "Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v); \
5232 } while (0)
5233
5234 // Smallest (specialized)
5235 test_adjust_initial_chunk_size(1, smallest, is_class);
5246 test_adjust_initial_chunk_size(medium - 1, medium, is_class);
5247 test_adjust_initial_chunk_size(medium, medium, is_class);
5248
5249 // Humongous
5250 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
5251
5252 #undef test_adjust_initial_chunk_size
5253 }
5254
5255 static void test_adjust_initial_chunk_size() {
5256 test_adjust_initial_chunk_size(false);
5257 test_adjust_initial_chunk_size(true);
5258 }
5259 };
5260
5261 void SpaceManager_test_adjust_initial_chunk_size() {
5262 SpaceManagerTest::test_adjust_initial_chunk_size();
5263 }
5264
5265 #endif // ASSERT
5266
5267 struct chunkmanager_statistics_t {
5268 int num_specialized_chunks;
5269 int num_small_chunks;
5270 int num_medium_chunks;
5271 int num_humongous_chunks;
5272 };
5273
5274 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
5275 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
5276 ChunkManager::ChunkManagerStatistics stat;
5277 chunk_manager->get_statistics(&stat);
5278 out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
5279 out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
5280 out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
5281 out->num_humongous_chunks = (int)stat.num_humongous_chunks;
5282 }
5283
5284 struct chunk_geometry_t {
5285 size_t specialized_chunk_word_size;
5286 size_t small_chunk_word_size;
5287 size_t medium_chunk_word_size;
5288 };
5289
5290 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
5291 if (mdType == Metaspace::NonClassType) {
5292 out->specialized_chunk_word_size = SpecializedChunk;
5293 out->small_chunk_word_size = SmallChunk;
5294 out->medium_chunk_word_size = MediumChunk;
5295 } else {
5296 out->specialized_chunk_word_size = ClassSpecializedChunk;
5297 out->small_chunk_word_size = ClassSmallChunk;
5298 out->medium_chunk_word_size = ClassMediumChunk;
5299 }
5300 }
5301
|