1 /* 2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #ifndef SHARE_VM_MEMORY_METASPACE_HPP 25 #define SHARE_VM_MEMORY_METASPACE_HPP 26 27 #include "memory/allocation.hpp" 28 #include "memory/memRegion.hpp" 29 #include "runtime/virtualspace.hpp" 30 #include "utilities/exceptions.hpp" 31 32 // Metaspace 33 // 34 // Metaspaces are Arenas for the VM's metadata. 35 // They are allocated one per class loader object, and one for the null 36 // bootstrap class loader 37 // Eventually for bootstrap loader we'll have a read-only section and read-write 38 // to write for DumpSharedSpaces and read for UseSharedSpaces 39 // 40 // block X ---+ +-------------------+ 41 // | | Virtualspace | 42 // | | | 43 // | | | 44 // | |-------------------| 45 // | || Chunk | 46 // | || | 47 // | ||---------- | 48 // +------>||| block 0 | | 49 // ||---------- | 50 // ||| block 1 | | 51 // ||---------- | 52 // || | 53 // |-------------------| 54 // | | 55 // | | 56 // +-------------------+ 57 // 58 59 class ChunkManager; 60 class ClassLoaderData; 61 class Metablock; 62 class Metachunk; 63 class MetaWord; 64 class Mutex; 65 class outputStream; 66 class SpaceManager; 67 class VirtualSpaceList; 68 69 // Metaspaces each have a SpaceManager and allocations 70 // are done by the SpaceManager. Allocations are done 71 // out of the current Metachunk. When the current Metachunk 72 // is exhausted, the SpaceManager gets a new one from 73 // the current VirtualSpace. When the VirtualSpace is exhausted 74 // the SpaceManager gets a new one. The SpaceManager 75 // also manages freelists of available Chunks. 76 // 77 // Currently the space manager maintains the list of 78 // virtual spaces and the list of chunks in use. Its 79 // allocate() method returns a block for use as a 80 // quantum of metadata. 81 82 class Metaspace : public CHeapObj<mtClass> { 83 friend class VMStructs; 84 friend class SpaceManager; 85 friend class VM_CollectForMetadataAllocation; 86 friend class MetaspaceGC; 87 friend class MetaspaceAux; 88 89 public: 90 enum MetadataType {ClassType = 0, 91 NonClassType = ClassType + 1, 92 MetadataTypeCount = ClassType + 2 93 }; 94 enum MetaspaceType { 95 StandardMetaspaceType, 96 BootMetaspaceType, 97 ROMetaspaceType, 98 ReadWriteMetaspaceType, 99 AnonymousMetaspaceType, 100 ReflectionMetaspaceType 101 }; 102 103 private: 104 void initialize(Mutex* lock, MetaspaceType type); 105 106 Metachunk* get_initialization_chunk(MetadataType mdtype, 107 size_t chunk_word_size, 108 size_t chunk_bunch); 109 110 // Align up the word size to the allocation word size 111 static size_t align_word_size_up(size_t); 112 113 // Aligned size of the metaspace. 114 static size_t _class_metaspace_size; 115 116 static size_t class_metaspace_size() { 117 return _class_metaspace_size; 118 } 119 static void set_class_metaspace_size(size_t metaspace_size) { 120 _class_metaspace_size = metaspace_size; 121 } 122 123 static size_t _first_chunk_word_size; 124 static size_t _first_class_chunk_word_size; 125 126 SpaceManager* _vsm; 127 SpaceManager* vsm() const { return _vsm; } 128 129 SpaceManager* _class_vsm; 130 SpaceManager* class_vsm() const { return _class_vsm; } 131 132 // Allocate space for metadata of type mdtype. This is space 133 // within a Metachunk and is used by 134 // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) 135 // which returns a Metablock. 136 MetaWord* allocate(size_t word_size, MetadataType mdtype); 137 138 // Virtual Space lists for both classes and other metadata 139 static VirtualSpaceList* _space_list; 140 static VirtualSpaceList* _class_space_list; 141 142 static ChunkManager* _chunk_manager_metadata; 143 static ChunkManager* _chunk_manager_class; 144 145 public: 146 static VirtualSpaceList* space_list() { return _space_list; } 147 static VirtualSpaceList* class_space_list() { return _class_space_list; } 148 static VirtualSpaceList* get_space_list(MetadataType mdtype) { 149 assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); 150 return mdtype == ClassType ? class_space_list() : space_list(); 151 } 152 153 static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; } 154 static ChunkManager* chunk_manager_class() { return _chunk_manager_class; } 155 static ChunkManager* get_chunk_manager(MetadataType mdtype) { 156 assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); 157 return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata(); 158 } 159 160 private: 161 // This is used by DumpSharedSpaces only, where only _vsm is used. So we will 162 // maintain a single list for now. 163 void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size); 164 165 #ifdef _LP64 166 static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base); 167 168 // Returns true if can use CDS with metaspace allocated as specified address. 169 static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base); 170 171 static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base); 172 173 static void initialize_class_space(ReservedSpace rs); 174 #endif 175 176 class AllocRecord : public CHeapObj<mtClass> { 177 public: 178 AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size) 179 : _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {} 180 AllocRecord *_next; 181 address _ptr; 182 MetaspaceObj::Type _type; 183 int _byte_size; 184 }; 185 186 AllocRecord * _alloc_record_head; 187 AllocRecord * _alloc_record_tail; 188 189 public: 190 191 Metaspace(Mutex* lock, MetaspaceType type); 192 ~Metaspace(); 193 194 // Initialize globals for Metaspace 195 static void global_initialize(); 196 197 static size_t first_chunk_word_size() { return _first_chunk_word_size; } 198 static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } 199 200 char* bottom() const; 201 size_t used_words_slow(MetadataType mdtype) const; 202 size_t free_words_slow(MetadataType mdtype) const; 203 size_t capacity_words_slow(MetadataType mdtype) const; 204 205 size_t used_bytes_slow(MetadataType mdtype) const; 206 size_t capacity_bytes_slow(MetadataType mdtype) const; 207 208 static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size, 209 bool read_only, MetaspaceObj::Type type, TRAPS); 210 void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); 211 212 MetaWord* expand_and_allocate(size_t size, 213 MetadataType mdtype); 214 215 static bool contains(const void *ptr); 216 void dump(outputStream* const out) const; 217 218 // Free empty virtualspaces 219 static void purge(MetadataType mdtype); 220 static void purge(); 221 222 void print_on(outputStream* st) const; 223 // Debugging support 224 void verify(); 225 226 class AllocRecordClosure : public StackObj { 227 public: 228 virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0; 229 }; 230 231 void iterate(AllocRecordClosure *closure); 232 233 // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False. 234 static bool using_class_space() { 235 return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces); 236 } 237 238 static bool is_class_space_allocation(MetadataType mdType) { 239 return mdType == ClassType && using_class_space(); 240 } 241 }; 242 243 class MetaspaceAux : AllStatic { 244 static size_t free_chunks_total_words(Metaspace::MetadataType mdtype); 245 246 // These methods iterate over the classloader data graph 247 // for the given Metaspace type. These are slow. 248 static size_t used_bytes_slow(Metaspace::MetadataType mdtype); 249 static size_t free_bytes_slow(Metaspace::MetadataType mdtype); 250 static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype); 251 static size_t capacity_bytes_slow(); 252 253 // Running sum of space in all Metachunks that has been 254 // allocated to a Metaspace. This is used instead of 255 // iterating over all the classloaders. One for each 256 // type of Metadata 257 static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount]; 258 // Running sum of space in all Metachunks that have 259 // are being used for metadata. One for each 260 // type of Metadata. 261 static size_t _allocated_used_words[Metaspace:: MetadataTypeCount]; 262 263 public: 264 // Decrement and increment _allocated_capacity_words 265 static void dec_capacity(Metaspace::MetadataType type, size_t words); 266 static void inc_capacity(Metaspace::MetadataType type, size_t words); 267 268 // Decrement and increment _allocated_used_words 269 static void dec_used(Metaspace::MetadataType type, size_t words); 270 static void inc_used(Metaspace::MetadataType type, size_t words); 271 272 // Total of space allocated to metadata in all Metaspaces. 273 // This sums the space used in each Metachunk by 274 // iterating over the classloader data graph 275 static size_t used_bytes_slow() { 276 return used_bytes_slow(Metaspace::ClassType) + 277 used_bytes_slow(Metaspace::NonClassType); 278 } 279 280 // Used by MetaspaceCounters 281 static size_t free_chunks_total_words(); 282 static size_t free_chunks_total_bytes(); 283 static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype); 284 285 static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) { 286 return _allocated_capacity_words[mdtype]; 287 } 288 static size_t allocated_capacity_words() { 289 return allocated_capacity_words(Metaspace::NonClassType) + 290 allocated_capacity_words(Metaspace::ClassType); 291 } 292 static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) { 293 return allocated_capacity_words(mdtype) * BytesPerWord; 294 } 295 static size_t allocated_capacity_bytes() { 296 return allocated_capacity_words() * BytesPerWord; 297 } 298 299 static size_t allocated_used_words(Metaspace::MetadataType mdtype) { 300 return _allocated_used_words[mdtype]; 301 } 302 static size_t allocated_used_words() { 303 return allocated_used_words(Metaspace::NonClassType) + 304 allocated_used_words(Metaspace::ClassType); 305 } 306 static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) { 307 return allocated_used_words(mdtype) * BytesPerWord; 308 } 309 static size_t allocated_used_bytes() { 310 return allocated_used_words() * BytesPerWord; 311 } 312 313 static size_t free_bytes(); 314 static size_t free_bytes(Metaspace::MetadataType mdtype); 315 316 static size_t reserved_bytes(Metaspace::MetadataType mdtype); 317 static size_t reserved_bytes() { 318 return reserved_bytes(Metaspace::ClassType) + 319 reserved_bytes(Metaspace::NonClassType); 320 } 321 322 static size_t committed_bytes(Metaspace::MetadataType mdtype); 323 static size_t committed_bytes() { 324 return committed_bytes(Metaspace::ClassType) + 325 committed_bytes(Metaspace::NonClassType); 326 } 327 328 static size_t min_chunk_size_words(); 329 static size_t min_chunk_size_bytes() { 330 return min_chunk_size_words() * BytesPerWord; 331 } 332 333 // Print change in used metadata. 334 static void print_metaspace_change(size_t prev_metadata_used); 335 static void print_on(outputStream * out); 336 static void print_on(outputStream * out, Metaspace::MetadataType mdtype); 337 338 static void print_class_waste(outputStream* out); 339 static void print_waste(outputStream* out); 340 static void dump(outputStream* out); 341 static void verify_free_chunks(); 342 // Checks that the values returned by allocated_capacity_bytes() and 343 // capacity_bytes_slow() are the same. 344 static void verify_capacity(); 345 static void verify_used(); 346 static void verify_metrics(); 347 }; 348 349 // Metaspace are deallocated when their class loader are GC'ed. 350 // This class implements a policy for inducing GC's to recover 351 // Metaspaces. 352 353 class MetaspaceGC : AllStatic { 354 355 // The current high-water-mark for inducing a GC. When 356 // the capacity of all space in the virtual lists reaches this value, 357 // a GC is induced and the value is increased. This should be changed 358 // to the space actually used for allocations to avoid affects of 359 // fragmentation losses to partially used chunks. Size is in words. 360 static size_t _capacity_until_GC; 361 362 // After a GC is done any allocation that fails should try to expand 363 // the capacity of the Metaspaces. This flag is set during attempts 364 // to allocate in the VMGCOperation that does the GC. 365 static bool _expand_after_GC; 366 367 // For a CMS collection, signal that a concurrent collection should 368 // be started. 369 static bool _should_concurrent_collect; 370 371 static uint _shrink_factor; 372 373 static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; } 374 375 static size_t shrink_factor() { return _shrink_factor; } 376 void set_shrink_factor(uint v) { _shrink_factor = v; } 377 378 public: 379 380 static size_t capacity_until_GC() { return _capacity_until_GC; } 381 static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } 382 static void dec_capacity_until_GC(size_t v) { 383 _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; 384 } 385 static bool expand_after_GC() { return _expand_after_GC; } 386 static void set_expand_after_GC(bool v) { _expand_after_GC = v; } 387 388 static bool should_concurrent_collect() { return _should_concurrent_collect; } 389 static void set_should_concurrent_collect(bool v) { 390 _should_concurrent_collect = v; 391 } 392 393 // The amount to increase the high-water-mark (_capacity_until_GC) 394 static size_t delta_capacity_until_GC(size_t word_size); 395 396 // It is expected that this will be called when the current capacity 397 // has been used and a GC should be considered. 398 static bool should_expand(VirtualSpaceList* vsl, size_t word_size); 399 400 // Calculate the new high-water mark at which to induce 401 // a GC. 402 static void compute_new_size(); 403 }; 404 405 #endif // SHARE_VM_MEMORY_METASPACE_HPP