< prev index next >

src/hotspot/share/memory/metaspace/spaceManager.cpp

Print this page
rev 58565 : 8238358: Implementation of JEP 371: Hidden Classes
Reviewed-by: duke
Contributed-by: mandy.chung@oracle.com, lois.foltan@oracle.com, david.holmes@oracle.com, harold.seigel@oracle.com, serguei.spitsyn@oracle.com, alex.buckley@oracle.com, jamsheed.c.m@oracle.com
   1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::UnsafeAnonymousMetaspaceType:   requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
  80     default:                                        requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::UnsafeAnonymousMetaspaceType:   requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
  87     default:                                        requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for unsafe anonymous metadata space.
 118   // UnsafeAnonymous metadata space is usually small since it is used for
 119   // class loader data's whose life cycle is governed by one class such as an
 120   // unsafe anonymous class.  The majority within 1K - 2K range and
 121   // rarely about 4K (64-bits JVM).
 122   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 123   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 124   // reduces space waste from 60+% to around 30%.
 125   if ((_space_type == Metaspace::UnsafeAnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 126       _mdtype == Metaspace::NonClassType &&
 127       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 128       word_size + Metachunk::overhead() <= SpecializedChunk) {
 129     return SpecializedChunk;
 130   }
 131 
 132   if (num_chunks_by_type(MediumIndex) == 0 &&
 133       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 134     chunk_word_size = (size_t) small_chunk_size();
 135     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 136       chunk_word_size = medium_chunk_size();
 137     }
 138   } else {
 139     chunk_word_size = medium_chunk_size();
 140   }
 141 
 142   // Might still need a humongous chunk.  Enforce
 143   // humongous allocations sizes to be aligned up to
 144   // the smallest chunk size.
 145   size_t if_humongous_sized_chunk =


   1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  58   for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
  59     if (requested <= chunk_sizes[i]) {
  60       return chunk_sizes[i];
  61     }
  62   }
  63 
  64   // ... or return the size as a humongous chunk.
  65   return requested;
  66 }
  67 
  68 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
  69   return adjust_initial_chunk_size(requested, is_class());
  70 }
  71 
  72 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
  73   size_t requested;
  74 
  75   if (is_class()) {
  76     switch (type) {
  77     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_class_chunk_word_size(); break;
  78     case Metaspace::ClassMirrorHolderMetaspaceType: requested = ClassSpecializedChunk; break;
  79     case Metaspace::ReflectionMetaspaceType:        requested = ClassSpecializedChunk; break;
  80     default:                                        requested = ClassSmallChunk; break;
  81     }
  82   } else {
  83     switch (type) {
  84     case Metaspace::BootMetaspaceType:              requested = Metaspace::first_chunk_word_size(); break;
  85     case Metaspace::ClassMirrorHolderMetaspaceType: requested = SpecializedChunk; break;
  86     case Metaspace::ReflectionMetaspaceType:        requested = SpecializedChunk; break;
  87     default:                                        requested = SmallChunk; break;
  88     }
  89   }
  90 
  91   // Adjust to one of the fixed chunk sizes (unless humongous)
  92   const size_t adjusted = adjust_initial_chunk_size(requested);
  93 
  94   assert(adjusted != 0, "Incorrect initial chunk size. Requested: "
  95          SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted);
  96 
  97   return adjusted;
  98 }
  99 
 100 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
 101 
 102   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 103     st->print("SpaceManager: " UINTX_FORMAT " %s chunks.",
 104         num_chunks_by_type(i), chunk_size_name(i));
 105   }
 106 
 107   chunk_manager()->locked_print_free_chunks(st);
 108 }
 109 
 110 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 111 
 112   // Decide between a small chunk and a medium chunk.  Up to
 113   // _small_chunk_limit small chunks can be allocated.
 114   // After that a medium chunk is preferred.
 115   size_t chunk_word_size;
 116 
 117   // Special case for hidden metadata space.
 118   // ClassMirrorHolder metadata space is usually small since it is used for
 119   // class loader data's whose life cycle is governed by one class such as a
 120   // weak hidden or unsafe anonymous class.  The majority within 1K - 2K range and
 121   // rarely about 4K (64-bits JVM).
 122   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
 123   // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
 124   // reduces space waste from 60+% to around 30%.
 125   if ((_space_type == Metaspace::ClassMirrorHolderMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
 126       _mdtype == Metaspace::NonClassType &&
 127       num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit &&
 128       word_size + Metachunk::overhead() <= SpecializedChunk) {
 129     return SpecializedChunk;
 130   }
 131 
 132   if (num_chunks_by_type(MediumIndex) == 0 &&
 133       num_chunks_by_type(SmallIndex) < small_chunk_limit) {
 134     chunk_word_size = (size_t) small_chunk_size();
 135     if (word_size + Metachunk::overhead() > small_chunk_size()) {
 136       chunk_word_size = medium_chunk_size();
 137     }
 138   } else {
 139     chunk_word_size = medium_chunk_size();
 140   }
 141 
 142   // Might still need a humongous chunk.  Enforce
 143   // humongous allocations sizes to be aligned up to
 144   // the smallest chunk size.
 145   size_t if_humongous_sized_chunk =


< prev index next >