100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
101
102 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
103 st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
104 num_chunks_by_type(i), chunk_size_name(i));
105 }
106
107 chunk_manager()->locked_print_free_chunks(st);
108 }
109
110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
111
112 // Decide between a small chunk and a medium chunk. Up to
113 // _small_chunk_limit small chunks can be allocated.
114 // After that a medium chunk is preferred.
115 size_t chunk_word_size;
116
117 // Special case for hidden metadata space.
118 // ClassMirrorHolder metadata space is usually small since it is used for
119 // class loader data's whose life cycle is governed by one class such as a
120 // weak hidden or unsafe anonymous class. The majority within 1K - 2K range and
121 // rarely about 4K (64-bits JVM).
122 // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
123 // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
124 // reduces space waste from 60+% to around 30%.
125 if ((_space_type == Metaspace::ClassMirrorHolderMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
126 _mdtype == Metaspace::NonClassType &&
127 num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
128 word_size + Metachunk::overhead() <= SpecializedChunk) {
129 return SpecializedChunk;
130 }
131
132 if (num_chunks_by_type(MediumIndex) == 0 &&
133 num_chunks_by_type(SmallIndex) < small_chunk_limit) {
134 chunk_word_size = (size_t) small_chunk_size();
135 if (word_size + Metachunk::overhead() > small_chunk_size()) {
136 chunk_word_size = medium_chunk_size();
137 }
138 } else {
139 chunk_word_size = medium_chunk_size();
140 }
|
100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
101
102 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
103 st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
104 num_chunks_by_type(i), chunk_size_name(i));
105 }
106
107 chunk_manager()->locked_print_free_chunks(st);
108 }
109
110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
111
112 // Decide between a small chunk and a medium chunk. Up to
113 // _small_chunk_limit small chunks can be allocated.
114 // After that a medium chunk is preferred.
115 size_t chunk_word_size;
116
117 // Special case for hidden metadata space.
118 // ClassMirrorHolder metadata space is usually small since it is used for
119 // class loader data's whose life cycle is governed by one class such as a
120 // non-strong hidden class or unsafe anonymous class. The majority within 1K - 2K range and
121 // rarely about 4K (64-bits JVM).
122 // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
123 // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
124 // reduces space waste from 60+% to around 30%.
125 if ((_space_type == Metaspace::ClassMirrorHolderMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
126 _mdtype == Metaspace::NonClassType &&
127 num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
128 word_size + Metachunk::overhead() <= SpecializedChunk) {
129 return SpecializedChunk;
130 }
131
132 if (num_chunks_by_type(MediumIndex) == 0 &&
133 num_chunks_by_type(SmallIndex) < small_chunk_limit) {
134 chunk_word_size = (size_t) small_chunk_size();
135 if (word_size + Metachunk::overhead() > small_chunk_size()) {
136 chunk_word_size = medium_chunk_size();
137 }
138 } else {
139 chunk_word_size = medium_chunk_size();
140 }
|