< prev index next >

src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp

Print this page
rev 57511 : [mq]: metaspace-improvement


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
  26 #define SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
  27 




  28 #include "memory/virtualspace.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "utilities/debug.hpp"

  31 #include "utilities/globalDefinitions.hpp"
  32 

  33 class outputStream;
  34 
  35 namespace metaspace {
  36 
  37 class Metachunk;
  38 class ChunkManager;
  39 class OccupancyMap;
  40 
  41 // A VirtualSpaceList node.









  42 class VirtualSpaceNode : public CHeapObj<mtClass> {
  43   friend class VirtualSpaceList;
  44 
  45   // Link to next VirtualSpaceNode
  46   VirtualSpaceNode* _next;
  47 
  48   // Whether this node is contained in class or metaspace.
  49   const bool _is_class;
  50 
  51   // total in the VirtualSpace
  52   ReservedSpace _rs;
  53   VirtualSpace _virtual_space;
  54   MetaWord* _top;
  55   // count of chunks contained in this VirtualSpace
  56   uintx _container_count;
  57 
  58   OccupancyMap* _occupancy_map;
  59 
  60   // Convenience functions to access the _virtual_space
  61   char* low()  const { return virtual_space()->low(); }
  62   char* high() const { return virtual_space()->high(); }
  63   char* low_boundary()  const { return virtual_space()->low_boundary(); }
  64   char* high_boundary() const { return virtual_space()->high_boundary(); }
  65 
  66   // The first Metachunk will be allocated at the bottom of the
  67   // VirtualSpace
  68   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
  69 
  70   // Committed but unused space in the virtual space
  71   size_t free_words_in_vs() const;
  72 
  73   // True if this node belongs to class metaspace.
  74   bool is_class() const { return _is_class; }
  75 
  76   // Helper function for take_from_committed: allocate padding chunks
  77   // until top is at the given address.
  78   void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
  79 
  80  public:
  81 
  82   VirtualSpaceNode(bool is_class, size_t byte_size);
  83   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
  84     _next(NULL), _is_class(is_class), _rs(rs), _top(NULL), _container_count(0), _occupancy_map(NULL) {}
  85   ~VirtualSpaceNode();
  86 
  87   // Convenience functions for logical bottom and end
  88   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
  89   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
  90 
  91   const OccupancyMap* occupancy_map() const { return _occupancy_map; }
  92   OccupancyMap* occupancy_map() { return _occupancy_map; }
  93 
  94   bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
  95 
  96   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
  97   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
  98 
  99   bool is_pre_committed() const { return _virtual_space.special(); }
 100 
 101   // address of next available space in _virtual_space;
 102   // Accessors
 103   VirtualSpaceNode* next() { return _next; }
 104   void set_next(VirtualSpaceNode* v) { _next = v; }
 105 
 106   void set_top(MetaWord* v) { _top = v; }

 107 
 108   // Accessors
 109   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }



































































 110 
 111   // Returns true if "word_size" is available in the VirtualSpace
 112   bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
 113 
 114   MetaWord* top() const { return _top; }
 115   void inc_top(size_t word_size) { _top += word_size; }
 116 
 117   uintx container_count() { return _container_count; }
 118   void inc_container_count();
 119   void dec_container_count();
 120 
 121   // used and capacity in this single entry in the list
 122   size_t used_words_in_vs() const;
 123   size_t capacity_words_in_vs() const;
 124 
 125   bool initialize();
 126 
 127   // get space from the virtual space
 128   Metachunk* take_from_committed(size_t chunk_word_size);
 129 
 130   // Allocate a chunk from the virtual space and return it.
 131   Metachunk* get_chunk_vs(size_t chunk_word_size);
 132 
 133   // Expands the committed space by at least min_words words.
 134   bool expand_by(size_t min_words, size_t preferred_words);





































































 135 
 136   // In preparation for deleting this node, remove all the chunks
 137   // in the node from any freelist.
 138   void purge(ChunkManager* chunk_manager);
 139 
 140   // If an allocation doesn't fit in the current node a new node is created.
 141   // Allocate chunks out of the remaining committed space in this node
 142   // to avoid wasting that memory.
 143   // This always adds up because all the chunk sizes are multiples of
 144   // the smallest chunk size.
 145   void retire(ChunkManager* chunk_manager);
 146 

 147   void print_on(outputStream* st) const                 { print_on(st, K); }
 148   void print_on(outputStream* st, size_t scale) const;
 149   void print_map(outputStream* st, bool is_class) const;
 150 
 151   // Debug support
 152   DEBUG_ONLY(void mangle();)
 153   // Verify counters and basic structure. Slow mode: verify all chunks in depth and occupancy map.
 154   DEBUG_ONLY(void verify(bool slow);)
 155   // Verify that all free chunks in this node are ideally merged
 156   // (there should not be multiple small chunks where a large chunk could exist.)
 157   DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)




 158 
 159 };

 160 
 161 } // namespace metaspace
 162 
 163 #endif // SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
  26 #define SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
  27 
  28 #include "memory/metaspace/constants.hpp"
  29 #include "memory/metaspace/counter.hpp"
  30 #include "memory/metaspace/chunkTree.hpp"
  31 #include "memory/metaspace/commitMask.hpp"
  32 #include "memory/virtualspace.hpp"
  33 #include "memory/memRegion.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/bitMap.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 
  38 
  39 class outputStream;
  40 
  41 namespace metaspace {
  42 
  43 class CommitLimiter;


  44 
  45 // VirtualSpaceNode manage a single address range of the Metaspace.
  46 //
  47 // That address range may contain interleaved committed and uncommitted
  48 // regions. It keeps track of which regions have committed and offers
  49 // functions to commit and uncommit regions.
  50 //
  51 // It allocates and hands out memory ranges, starting at the bottom.
  52 //
  53 // Address range must be aligned to root chunk size.
  54 //
  55 class VirtualSpaceNode : public CHeapObj<mtClass> {

  56 
  57   // Link to next VirtualSpaceNode
  58   VirtualSpaceNode* _next;
  59 




  60   ReservedSpace _rs;















































  61 
  62   // Start pointer of the area.
  63   MetaWord* const _base;


  64 
  65   // Size, in words, of the whole node
  66   const size_t _word_size;
  67 
  68   // Size, in words, of the range of this node which has been handed out in
  69   // the form of chunks.
  70   size_t _used_words;
  71 
  72   // The bitmap describing the commit state of the region:
  73   // Each bit covers a region of 64K (see constants::commit_granule_size).
  74   CommitMask _commit_mask;
  75 
  76   // An array of chunk trees. Each one describes fragmentation inside the associated root chunk.
  77   ChunkTreeArray _chunk_tree_array;
  78 
  79   // Limiter object to ask before expanding the committed size of this node.
  80   CommitLimiter* const _commit_limiter;
  81 
  82   // Points to outside size counters which we are to increase/decrease when we commit/uncommit
  83   // space from this node.
  84   SizeCounter* const _total_reserved_words_counter;
  85   SizeCounter* const _total_committed_words_counter;
  86 
  87   /// committing, uncommitting ///
  88 
  89   // Given a pointer into this node, calculate the start of the commit granule
  90   // the pointer points into.
  91   MetaWord* calc_start_of_granule(MetaWord* p) const {
  92     DEBUG_ONLY(check_pointer(p));
  93     return align_down(p, constants::commit_granule_bytes);
  94   }
  95 
  96   // Given an address range, ensure it is committed.
  97   //
  98   // The range has to be aligned to granule size.
  99   //
 100   // Function will:
 101   // - check how many granules in that region are uncommitted; If all are committed, it
 102   //    returns true immediately.
 103   // - check if committing those uncommitted granules would bring us over the commit limit
 104   //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 105   // - commit the memory.
 106   // - mark the range as committed in the commit mask
 107   //
 108   // Returns true if success, false if it did hit a commit limit.
 109   bool commit_range(MetaWord* p, size_t word_size);
 110 
 111   //// creation ////
 112 
 113   // Create a new empty node spanning the given reserved space.
 114   VirtualSpaceNode(ReservedSpace rs,
 115                    CommitLimiter* limiter,
 116                    SizeCounter* reserve_counter,
 117                    SizeCounter* commit_counter);
 118 
 119   MetaWord* base() const        { return _base; }
 120 
 121   // Reserved size of the whole node.
 122   size_t word_size() const      { return _word_size; }
 123 
 124 public:
 125 
 126   // Create a node of a given size
 127   static VirtualSpaceNode* create_node(size_t word_size,
 128                                        CommitLimiter* limiter,
 129                                        SizeCounter* reserve_counter,
 130                                        SizeCounter* commit_counter);
 131 
 132   // Create a node over an existing space
 133   static VirtualSpaceNode* create_node(ReservedSpace rs,
 134                                        CommitLimiter* limiter,
 135                                        SizeCounter* reserve_counter,
 136                                        SizeCounter* commit_counter);
 137 
 138   ~VirtualSpaceNode();

















 139 
 140   //// Chunk allocation, splitting, merging /////

 141 
 142   // Allocate a root chunk from this node. Will fail and return NULL
 143   // if the node is full.
 144   // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
 145   // Hence, before using this chunk, it must be committed.
 146   // Also, no limits are checked, since no committing takes place.
 147   Metachunk* allocate_root_chunk();
 148 
 149   // Given a chunk c, split it recursively until you get a chunk of the given target_level.
 150   //
 151   // The original chunk must not be part of a freelist.
 152   //
 153   // Returns pointer to the result chunk; returns split off chunks in splinters array.
 154   //
 155   // Returns NULL if chunk cannot be split at least once.
 156   Metachunk* split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]);
 157 
 158   // Given a chunk, attempt to merge it recursively with its neighboring chunks.
 159   //
 160   // If successful (merged at least once), returns address of
 161   // the merged chunk; NULL otherwise.
 162   //
 163   // The merged chunks are removed from their freelist; the number of merged chunks is
 164   // returned, split by level, in num_merged array. Note that these numbers does not
 165   // include the original chunk.
 166   //
 167   // !!! Please note that if this method returns a non-NULL value, the
 168   // original chunk will be invalid and should not be accessed anymore! !!!
 169   Metachunk* merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]);
 170 
 171 
 172   /// misc /////
 173 
 174   // Returns size, in words, of the used space in this node alone.
 175   // (Notes:
 176   //  - This is the space handed out to the ChunkManager, so it is "used" from the viewpoint of this node,
 177   //    but not necessarily used for Metadata.
 178   //  - This may or may not be committed memory.
 179   size_t used_words() const             { return _used_words; }
 180 
 181   // Returns size, in words, of how much space is left in this node alone.
 182   size_t free_words() const             { return _word_size - _used_words; }
 183 
 184   // Returns size, in words, of committed space in this node alone.
 185   size_t committed_words() const;
 186 
 187   //// Committing/uncommitting memory /////
 188 
 189   // Given an address range, ensure it is committed.
 190   //
 191   // The range does not have to be aligned to granule size. However, the function will always commit
 192   // whole granules.
 193   //
 194   // Function will:
 195   // - check how many granules in that region are uncommitted; If all are committed, it
 196   //    returns true immediately.
 197   // - check if committing those uncommitted granules would bring us over the commit limit
 198   //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 199   // - commit the memory.
 200   // - mark the range as committed in the commit mask
 201   //
 202   // Returns true if success, false if it did hit a commit limit.
 203   bool ensure_range_is_committed(MetaWord* p, size_t word_size);
 204 
 205   // Given an address range (which has to be aligned to commit granule size):
 206   //  - uncommit it
 207   //  - mark it as uncommitted in the commit mask
 208   bool uncommit_range(MetaWord* p, size_t word_size);
 209 
 210   //// List stuff ////
 211   VirtualSpaceNode* next() const        { return _next; }
 212   void set_next(VirtualSpaceNode* vsn)  { _next = vsn; }
 213 



 214 
 215   /// Debug stuff ////





 216 
 217   // Print a description about this node.
 218   void print_on(outputStream* st) const                   { print_on(st, K); }
 219   void print_on(outputStream* st, size_t scale) const;

 220 
 221   // Verify counters and basic structure. Slow mode: verify all chunks in depth
 222   bool contains(const MetaWord* p) const {
 223     return p >= _base && p < _base + _used_words;
 224   }
 225 
 226 #ifdef ASSERT
 227   void check_pointer(const MetaWord* p) const {
 228     assert(contains(p), "invalid pointer");
 229   }
 230   void verify(bool slow) const;
 231 #endif
 232 
 233 };
 234 
 235 
 236 } // namespace metaspace
 237 
 238 #endif // SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP
< prev index next >