< prev index next >

src/hotspot/share/memory/metaspace/spaceManager.cpp

Print this page




  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::UnsafeAnonymousMetaspaceType:   requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
  80     default:                                        requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::UnsafeAnonymousMetaspaceType:   requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
  87     default:                                        requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for unsafe anonymous metadata space.
 118   // UnsafeAnonymous metadata space is usually small since it is used for
 119   // class loader data's whose life cycle is governed by one class such as an
 120   // unsafe anonymous class.  The majority within 1K - 2K range and
 121   // rarely about 4K (64-bits JVM).
 122   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 123   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 124   // reduces space waste from 60+% to around 30%.
 125   if ((_space_type == Metaspace::UnsafeAnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 126       _mdtype == Metaspace::NonClassType &&
 127       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 128       word_size + Metachunk::overhead() <= SpecializedChunk) {
 129     return SpecializedChunk;
 130   }
 131 
 132   if (num_chunks_by_type(MediumIndex) == 0 &&
 133       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 134     chunk_word_size = (size_t) small_chunk_size();
 135     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 136       chunk_word_size = medium_chunk_size();
 137     }
 138   } else {
 139     chunk_word_size = medium_chunk_size();
 140   }
 141 
 142   // Might still need a humongous chunk.  Enforce
 143   // humongous allocations sizes to be aligned up to
 144   // the smallest chunk size.
 145   size_t if_humongous_sized_chunk =




  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::ShortLivedMetaspaceType:        requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
  80     default:                                        requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::ShortLivedMetaspaceType:        requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
  87     default:                                        requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for hidden metadata space.
 118   // ShortLived metadata space is usually small since it is used for
 119   // class loader data's whose life cycle is governed by one class such as a
 120   // weak hidden or unsafe anonymous class.  The majority within 1K - 2K range and
 121   // rarely about 4K (64-bits JVM).
 122   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 123   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 124   // reduces space waste from 60+% to around 30%.
 125   if ((_space_type == Metaspace::ShortLivedMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 126       _mdtype == Metaspace::NonClassType &&
 127       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 128       word_size + Metachunk::overhead() <= SpecializedChunk) {
 129     return SpecializedChunk;
 130   }
 131 
 132   if (num_chunks_by_type(MediumIndex) == 0 &&
 133       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 134     chunk_word_size = (size_t) small_chunk_size();
 135     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 136       chunk_word_size = medium_chunk_size();
 137     }
 138   } else {
 139     chunk_word_size = medium_chunk_size();
 140   }
 141 
 142   // Might still need a humongous chunk.  Enforce
 143   // humongous allocations sizes to be aligned up to
 144   // the smallest chunk size.
 145   size_t if_humongous_sized_chunk =


< prev index next >