src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File bug_8003424.4 Sdiff src/share/vm/memory

src/share/vm/memory/metaspace.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/binaryTreeDictionary.hpp"
  27 #include "memory/freeList.hpp"
  28 #include "memory/collectorPolicy.hpp"
  29 #include "memory/filemap.hpp"
  30 #include "memory/freeList.hpp"
  31 #include "memory/metablock.hpp"
  32 #include "memory/metachunk.hpp"
  33 #include "memory/metaspace.hpp"
  34 #include "memory/metaspaceShared.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/globals.hpp"

  38 #include "runtime/mutex.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 #include "services/memTracker.hpp"
  41 #include "utilities/copy.hpp"
  42 #include "utilities/debug.hpp"
  43 
  44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  46 // Define this macro to enable slow integrity checking of
  47 // the free chunk lists
  48 const bool metaspace_slow_verify = false;
  49 
  50 // Parameters for stress mode testing
  51 const uint metadata_deallocate_a_lot_block = 10;
  52 const uint metadata_deallocate_a_lock_chunk = 3;
  53 size_t const allocation_from_dictionary_limit = 64 * K;
  54 
  55 MetaWord* last_allocated = 0;
  56 


  57 // Used in declarations in SpaceManager and ChunkManager
  58 enum ChunkIndex {
  59   ZeroIndex = 0,
  60   SpecializedIndex = ZeroIndex,
  61   SmallIndex = SpecializedIndex + 1,
  62   MediumIndex = SmallIndex + 1,
  63   HumongousIndex = MediumIndex + 1,
  64   NumberOfFreeLists = 3,
  65   NumberOfInUseLists = 4
  66 };
  67 
  68 enum ChunkSizes {    // in words.
  69   ClassSpecializedChunk = 128,
  70   SpecializedChunk = 128,
  71   ClassSmallChunk = 256,
  72   SmallChunk = 512,
  73   ClassMediumChunk = 4 * K,
  74   MediumChunk = 8 * K,
  75   HumongousChunkGranularity = 8
  76 };


 244   }
 245 }
 246 
 247   void print_on(outputStream* st) const;
 248 };
 249 
 250 class VirtualSpaceNode : public CHeapObj<mtClass> {
 251   friend class VirtualSpaceList;
 252 
 253   // Link to next VirtualSpaceNode
 254   VirtualSpaceNode* _next;
 255 
 256   // total in the VirtualSpace
 257   MemRegion _reserved;
 258   ReservedSpace _rs;
 259   VirtualSpace _virtual_space;
 260   MetaWord* _top;
 261   // count of chunks contained in this VirtualSpace
 262   uintx _container_count;
 263 
 264   // Convenience functions for logical bottom and end
 265   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 266   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 267 
 268   // Convenience functions to access the _virtual_space
 269   char* low()  const { return virtual_space()->low(); }
 270   char* high() const { return virtual_space()->high(); }
 271 
 272   // The first Metachunk will be allocated at the bottom of the
 273   // VirtualSpace
 274   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 275 
 276   void inc_container_count();
 277 #ifdef ASSERT
 278   uint container_count_slow();
 279 #endif
 280 
 281  public:
 282 
 283   VirtualSpaceNode(size_t byte_size);
 284   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 285   ~VirtualSpaceNode();
 286 




 287   // address of next available space in _virtual_space;
 288   // Accessors
 289   VirtualSpaceNode* next() { return _next; }
 290   void set_next(VirtualSpaceNode* v) { _next = v; }
 291 
 292   void set_reserved(MemRegion const v) { _reserved = v; }
 293   void set_top(MetaWord* v) { _top = v; }
 294 
 295   // Accessors
 296   MemRegion* reserved() { return &_reserved; }
 297   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 298 
 299   // Returns true if "word_size" is available in the VirtualSpace
 300   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 301 
 302   MetaWord* top() const { return _top; }
 303   void inc_top(size_t word_size) { _top += word_size; }
 304 
 305   uintx container_count() { return _container_count; }
 306   void dec_container_count();


1296 }
1297 
1298 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1299 
1300   // If the user wants a limit, impose one.
1301   // The reason for someone using this flag is to limit reserved space.  So
1302   // for non-class virtual space, compare against virtual spaces that are reserved.
1303   // For class virtual space, we only compare against the committed space, not
1304   // reserved space, because this is a larger space prereserved for compressed
1305   // class pointers.
1306   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
1307     size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
1308               MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
1309     if (real_allocated >= MaxMetaspaceSize) {
1310       return false;
1311     }
1312   }
1313 
1314   // Class virtual space should always be expanded.  Call GC for the other
1315   // metadata virtual space.
1316   if (vsl == Metaspace::class_space_list()) return true;

1317 
1318   // If this is part of an allocation after a GC, expand
1319   // unconditionally.
1320   if (MetaspaceGC::expand_after_GC()) {
1321     return true;
1322   }
1323 
1324 
1325   // If the capacity is below the minimum capacity, allow the
1326   // expansion.  Also set the high-water-mark (capacity_until_GC)
1327   // to that minimum capacity so that a GC will not be induced
1328   // until that minimum capacity is exceeded.
1329   size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
1330   size_t metaspace_size_bytes = MetaspaceSize;
1331   if (committed_capacity_bytes < metaspace_size_bytes ||
1332       capacity_until_GC() == 0) {
1333     set_capacity_until_GC(metaspace_size_bytes);
1334     return true;
1335   } else {
1336     if (committed_capacity_bytes < capacity_until_GC()) {


2240              "Need branch for ClassSpecializedChunk");
2241       return SpecializedIndex;
2242     case SmallChunk:
2243     case ClassSmallChunk:
2244       return SmallIndex;
2245     case MediumChunk:
2246     case ClassMediumChunk:
2247       return MediumIndex;
2248     default:
2249       assert(size > MediumChunk || size > ClassMediumChunk,
2250              "Not a humongous chunk");
2251       return HumongousIndex;
2252   }
2253 }
2254 
2255 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2256   assert_lock_strong(_lock);
2257   size_t raw_word_size = get_raw_word_size(word_size);
2258   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2259   assert(raw_word_size >= min_size,
2260     err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2261   block_freelists()->return_block(p, raw_word_size);
2262 }
2263 
2264 // Adds a chunk to the list of chunks in use.
2265 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2266 
2267   assert(new_chunk != NULL, "Should not be NULL");
2268   assert(new_chunk->next() == NULL, "Should not be on a list");
2269 
2270   new_chunk->reset_empty();
2271 
2272   // Find the correct list and and set the current
2273   // chunk for that list.
2274   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2275 
2276   if (index != HumongousIndex) {
2277     set_current_chunk(new_chunk);
2278     new_chunk->set_next(chunks_in_use(index));
2279     set_chunks_in_use(index, new_chunk);
2280   } else {


2357   }
2358 #endif
2359   // Is there space in the current chunk?
2360   MetaWord* result = NULL;
2361 
2362   // For DumpSharedSpaces, only allocate out of the current chunk which is
2363   // never null because we gave it the size we wanted.   Caller reports out
2364   // of memory if this returns null.
2365   if (DumpSharedSpaces) {
2366     assert(current_chunk() != NULL, "should never happen");
2367     inc_used_metrics(word_size);
2368     return current_chunk()->allocate(word_size); // caller handles null result
2369   }
2370   if (current_chunk() != NULL) {
2371     result = current_chunk()->allocate(word_size);
2372   }
2373 
2374   if (result == NULL) {
2375     result = grow_and_allocate(word_size);
2376   }
2377   if (result > 0) {
2378     inc_used_metrics(word_size);
2379     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2380            "Head of the list is being allocated");
2381   }
2382 
2383   return result;
2384 }
2385 
2386 void SpaceManager::verify() {
2387   // If there are blocks in the dictionary, then
2388   // verfication of chunks does not work since
2389   // being in the dictionary alters a chunk.
2390   if (block_freelists()->total_size() == 0) {
2391     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2392       Metachunk* curr = chunks_in_use(i);
2393       while (curr != NULL) {
2394         curr->verify();
2395         verify_chunk_size(curr);
2396         curr = curr->next();
2397       }


2461   for (ChunkIndex index = ZeroIndex;
2462        index < NumberOfInUseLists;
2463        index = next_chunk_index(index)) {
2464     for (Metachunk* curr = chunks_in_use(index);
2465          curr != NULL;
2466          curr = curr->next()) {
2467       curr->mangle();
2468     }
2469   }
2470 }
2471 #endif // PRODUCT
2472 
2473 // MetaspaceAux
2474 
2475 
2476 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2477 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2478 
2479 size_t MetaspaceAux::free_bytes() {
2480   size_t result = 0;
2481   if (Metaspace::class_space_list() != NULL) {

2482     result = result + Metaspace::class_space_list()->free_bytes();
2483   }
2484   if (Metaspace::space_list() != NULL) {
2485     result = result + Metaspace::space_list()->free_bytes();
2486   }
2487   return result;
2488 }
2489 
2490 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2491   assert_lock_strong(SpaceManager::expand_lock());
2492   assert(words <= allocated_capacity_words(mdtype),
2493     err_msg("About to decrement below 0: words " SIZE_FORMAT
2494             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2495             words, mdtype, allocated_capacity_words(mdtype)));
2496   _allocated_capacity_words[mdtype] -= words;
2497 }
2498 
2499 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2500   assert_lock_strong(SpaceManager::expand_lock());
2501   // Needs to be atomic


2532     if (msp != NULL) {
2533       used += msp->used_words_slow(mdtype);
2534     }
2535   }
2536   return used * BytesPerWord;
2537 }
2538 
2539 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2540   size_t free = 0;
2541   ClassLoaderDataGraphMetaspaceIterator iter;
2542   while (iter.repeat()) {
2543     Metaspace* msp = iter.get_next();
2544     if (msp != NULL) {
2545       free += msp->free_words(mdtype);
2546     }
2547   }
2548   return free * BytesPerWord;
2549 }
2550 
2551 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {



2552   // Don't count the space in the freelists.  That space will be
2553   // added to the capacity calculation as needed.
2554   size_t capacity = 0;
2555   ClassLoaderDataGraphMetaspaceIterator iter;
2556   while (iter.repeat()) {
2557     Metaspace* msp = iter.get_next();
2558     if (msp != NULL) {
2559       capacity += msp->capacity_words_slow(mdtype);
2560     }
2561   }
2562   return capacity * BytesPerWord;
2563 }
2564 
2565 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2566   size_t reserved = (mdtype == Metaspace::ClassType) ?
2567                        Metaspace::class_space_list()->virtual_space_total() :
2568                        Metaspace::space_list()->virtual_space_total();
2569   return reserved * BytesPerWord;


2570 }
2571 
2572 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2573 
2574 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {



2575   ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2576                             Metaspace::class_space_list()->chunk_manager() :
2577                             Metaspace::space_list()->chunk_manager();
2578   chunk->slow_verify();
2579   return chunk->free_chunks_total();
2580 }
2581 
2582 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2583   return free_chunks_total(mdtype) * BytesPerWord;
2584 }
2585 
2586 size_t MetaspaceAux::free_chunks_total() {
2587   return free_chunks_total(Metaspace::ClassType) +
2588          free_chunks_total(Metaspace::NonClassType);
2589 }
2590 
2591 size_t MetaspaceAux::free_chunks_total_in_bytes() {
2592   return free_chunks_total() * BytesPerWord;
2593 }
2594 


2598     gclog_or_tty->print(" "  SIZE_FORMAT
2599                         "->" SIZE_FORMAT
2600                         "("  SIZE_FORMAT ")",
2601                         prev_metadata_used,
2602                         allocated_used_bytes(),
2603                         reserved_in_bytes());
2604   } else {
2605     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2606                         "->" SIZE_FORMAT "K"
2607                         "("  SIZE_FORMAT "K)",
2608                         prev_metadata_used / K,
2609                         allocated_used_bytes() / K,
2610                         reserved_in_bytes()/ K);
2611   }
2612 
2613   gclog_or_tty->print("]");
2614 }
2615 
2616 // This is printed when PrintGCDetails
2617 void MetaspaceAux::print_on(outputStream* out) {
2618   Metaspace::MetadataType ct = Metaspace::ClassType;
2619   Metaspace::MetadataType nct = Metaspace::NonClassType;
2620 
2621   out->print_cr(" Metaspace total "
2622                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2623                 " reserved " SIZE_FORMAT "K",
2624                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
2625 
2626   out->print_cr("  data space     "
2627                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2628                 " reserved " SIZE_FORMAT "K",
2629                 allocated_capacity_bytes(nct)/K,
2630                 allocated_used_bytes(nct)/K,
2631                 reserved_in_bytes(nct)/K);


2632   out->print_cr("  class space    "
2633                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2634                 " reserved " SIZE_FORMAT "K",
2635                 allocated_capacity_bytes(ct)/K,
2636                 allocated_used_bytes(ct)/K,
2637                 reserved_in_bytes(ct)/K);

2638 }
2639 
2640 // Print information for class space and data space separately.
2641 // This is almost the same as above.
2642 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2643   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2644   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2645   size_t used_bytes = used_bytes_slow(mdtype);
2646   size_t free_bytes = free_in_bytes(mdtype);
2647   size_t used_and_free = used_bytes + free_bytes +
2648                            free_chunks_capacity_bytes;
2649   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2650              "K + unused in chunks " SIZE_FORMAT "K  + "
2651              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2652              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2653              used_bytes / K,
2654              free_bytes / K,
2655              free_chunks_capacity_bytes / K,
2656              used_and_free / K,
2657              capacity_bytes / K);
2658   // Accounting can only be correct if we got the values during a safepoint
2659   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2660 }
2661 
2662 // Print total fragmentation for class and data metaspaces separately
2663 void MetaspaceAux::print_waste(outputStream* out) {
























2664 


2665   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2666   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2667   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2668   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2669 
2670   ClassLoaderDataGraphMetaspaceIterator iter;
2671   while (iter.repeat()) {
2672     Metaspace* msp = iter.get_next();
2673     if (msp != NULL) {
2674       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2675       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2676       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2677       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2678       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2679       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2680       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2681 
2682       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2683       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2684       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2685       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2686       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2687       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2688       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2689     }
2690   }
2691   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2692   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2693                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2694                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2695                         "large count " SIZE_FORMAT,
2696              specialized_count, specialized_waste, small_count,
2697              small_waste, medium_count, medium_waste, humongous_count);
2698   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2699                            SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2700                            SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2701                            "large count " SIZE_FORMAT,
2702              cls_specialized_count, cls_specialized_waste,
2703              cls_small_count, cls_small_waste,
2704              cls_medium_count, cls_medium_waste, cls_humongous_count);
2705 }
2706 
2707 // Dump global metaspace things from the end of ClassLoaderDataGraph
2708 void MetaspaceAux::dump(outputStream* out) {
2709   out->print_cr("All Metaspace:");
2710   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2711   out->print("class space: "); print_on(out, Metaspace::ClassType);
2712   print_waste(out);
2713 }
2714 
2715 void MetaspaceAux::verify_free_chunks() {
2716   Metaspace::space_list()->chunk_manager()->verify();

2717   Metaspace::class_space_list()->chunk_manager()->verify();

2718 }
2719 
2720 void MetaspaceAux::verify_capacity() {
2721 #ifdef ASSERT
2722   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2723   // For purposes of the running sum of capacity, verify against capacity
2724   size_t capacity_in_use_bytes = capacity_bytes_slow();
2725   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2726     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2727             " capacity_bytes_slow()" SIZE_FORMAT,
2728             running_sum_capacity_bytes, capacity_in_use_bytes));
2729   for (Metaspace::MetadataType i = Metaspace::ClassType;
2730        i < Metaspace:: MetadataTypeCount;
2731        i = (Metaspace::MetadataType)(i + 1)) {
2732     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2733     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2734       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2735               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2736               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2737   }


2759 #endif
2760 }
2761 
2762 void MetaspaceAux::verify_metrics() {
2763   verify_capacity();
2764   verify_used();
2765 }
2766 
2767 
2768 // Metaspace methods
2769 
2770 size_t Metaspace::_first_chunk_word_size = 0;
2771 size_t Metaspace::_first_class_chunk_word_size = 0;
2772 
2773 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2774   initialize(lock, type);
2775 }
2776 
2777 Metaspace::~Metaspace() {
2778   delete _vsm;

2779   delete _class_vsm;

2780 }
2781 
2782 VirtualSpaceList* Metaspace::_space_list = NULL;
2783 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2784 
2785 #define VIRTUALSPACEMULTIPLIER 2
2786 













































































































2787 void Metaspace::global_initialize() {
2788   // Initialize the alignment for shared spaces.
2789   int max_alignment = os::vm_page_size();





2790   MetaspaceShared::set_max_alignment(max_alignment);
2791 
2792   if (DumpSharedSpaces) {
2793     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2794     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2795     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
2796     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
2797 
2798     // Initialize with the sum of the shared space sizes.  The read-only
2799     // and read write metaspace chunks will be allocated out of this and the
2800     // remainder is the misc code and data chunks.
2801     size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2802                                  SharedMiscDataSize + SharedMiscCodeSize,
2803                                  os::vm_allocation_granularity());
2804     size_t word_size = total/wordSize;
2805     _space_list = new VirtualSpaceList(word_size);















2806   } else {
2807     // If using shared space, open the file that contains the shared space
2808     // and map in the memory before initializing the rest of metaspace (so
2809     // the addresses don't conflict)

2810     if (UseSharedSpaces) {
2811       FileMapInfo* mapinfo = new FileMapInfo();
2812       memset(mapinfo, 0, sizeof(FileMapInfo));
2813 
2814       // Open the shared archive file, read and validate the header. If
2815       // initialization fails, shared spaces [UseSharedSpaces] are
2816       // disabled and the file is closed.
2817       // Map in spaces now also
2818       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2819         FileMapInfo::set_current_info(mapinfo);
2820       } else {
2821         assert(!mapinfo->is_open() && !UseSharedSpaces,
2822                "archive file not closed or shared spaces not disabled.");
2823       }













2824     }

2825 
2826     // Initialize these before initializing the VirtualSpaceList
2827     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2828     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2829     // Make the first class chunk bigger than a medium chunk so it's not put
2830     // on the medium chunk list.   The next chunk will be small and progress
2831     // from there.  This size calculated by -version.
2832     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2833                                        (ClassMetaspaceSize/BytesPerWord)*2);
2834     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2835     // Arbitrarily set the initial virtual space to a multiple
2836     // of the boot class loader size.
2837     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2838     // Initialize the list of virtual spaces.
2839     _space_list = new VirtualSpaceList(word_size);
2840   }
2841 }
2842 
2843 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2844 // Java heap because the compression algorithm is the same for each.  The
2845 // argument passed in is at the top of the compressed space
2846 void Metaspace::initialize_class_space(ReservedSpace rs) {
2847   // The reserved space size may be bigger because of alignment, esp with UseLargePages
2848   assert(rs.size() >= ClassMetaspaceSize,
2849          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2850   _class_space_list = new VirtualSpaceList(rs);
2851 }
2852 
2853 void Metaspace::initialize(Mutex* lock,
2854                            MetaspaceType type) {
2855 
2856   assert(space_list() != NULL,
2857     "Metadata VirtualSpaceList has not been initialized");
2858 
2859   _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
2860   if (_vsm == NULL) {
2861     return;
2862   }
2863   size_t word_size;
2864   size_t class_word_size;
2865   vsm()->get_initial_chunk_sizes(type,
2866                                  &word_size,
2867                                  &class_word_size);
2868 

2869   assert(class_space_list() != NULL,
2870     "Class VirtualSpaceList has not been initialized");
2871 
2872   // Allocate SpaceManager for classes.
2873   _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
2874   if (_class_vsm == NULL) {
2875     return;
2876   }

2877 
2878   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2879 
2880   // Allocate chunk for metadata objects
2881   Metachunk* new_chunk =
2882      space_list()->get_initialization_chunk(word_size,
2883                                             vsm()->medium_chunk_bunch());
2884   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2885   if (new_chunk != NULL) {
2886     // Add to this manager's list of chunks in use and current_chunk().
2887     vsm()->add_chunk(new_chunk, true);
2888   }
2889 
2890   // Allocate chunk for class metadata objects

2891   Metachunk* class_chunk =
2892      class_space_list()->get_initialization_chunk(class_word_size,
2893                                                   class_vsm()->medium_chunk_bunch());
2894   if (class_chunk != NULL) {
2895     class_vsm()->add_chunk(class_chunk, true);
2896   }

2897 
2898   _alloc_record_head = NULL;
2899   _alloc_record_tail = NULL;
2900 }
2901 
2902 size_t Metaspace::align_word_size_up(size_t word_size) {
2903   size_t byte_size = word_size * wordSize;
2904   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2905 }
2906 
2907 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2908   // DumpSharedSpaces doesn't use class metadata area (yet)
2909   if (mdtype == ClassType && !DumpSharedSpaces) {

2910     return  class_vsm()->allocate(word_size);
2911   } else {
2912     return  vsm()->allocate(word_size);
2913   }
2914 }
2915 
2916 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2917   MetaWord* result;
2918   MetaspaceGC::set_expand_after_GC(true);
2919   size_t before_inc = MetaspaceGC::capacity_until_GC();
2920   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
2921   MetaspaceGC::inc_capacity_until_GC(delta_bytes);
2922   if (PrintGCDetails && Verbose) {
2923     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2924       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2925   }
2926 
2927   result = allocate(word_size, mdtype);
2928 
2929   return result;
2930 }
2931 
2932 // Space allocated in the Metaspace.  This may
2933 // be across several metadata virtual spaces.
2934 char* Metaspace::bottom() const {
2935   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2936   return (char*)vsm()->current_chunk()->bottom();
2937 }
2938 
2939 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
2940   // return vsm()->allocated_used_words();
2941   return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2942                                vsm()->sum_used_in_chunks_in_use();  // includes overhead!


2943 }
2944 
2945 size_t Metaspace::free_words(MetadataType mdtype) const {
2946   return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2947                                vsm()->sum_free_in_chunks_in_use();



2948 }
2949 
2950 // Space capacity in the Metaspace.  It includes
2951 // space in the list of chunks from which allocations
2952 // have been made. Don't include space in the global freelist and
2953 // in the space available in the dictionary which
2954 // is already counted in some chunk.
2955 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
2956   return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2957                                vsm()->sum_capacity_in_chunks_in_use();



2958 }
2959 
2960 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
2961   return used_words_slow(mdtype) * BytesPerWord;
2962 }
2963 
2964 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
2965   return capacity_words_slow(mdtype) * BytesPerWord;
2966 }
2967 
2968 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2969   if (SafepointSynchronize::is_at_safepoint()) {
2970     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2971     // Don't take Heap_lock
2972     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2973     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2974       // Dark matter.  Too small for dictionary.
2975 #ifdef ASSERT
2976       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2977 #endif
2978       return;
2979     }
2980     if (is_class) {
2981        class_vsm()->deallocate(ptr, word_size);
2982     } else {
2983       vsm()->deallocate(ptr, word_size);
2984     }
2985   } else {
2986     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2987 
2988     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2989       // Dark matter.  Too small for dictionary.
2990 #ifdef ASSERT
2991       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2992 #endif
2993       return;
2994     }
2995     if (is_class) {
2996       class_vsm()->deallocate(ptr, word_size);
2997     } else {
2998       vsm()->deallocate(ptr, word_size);
2999     }
3000   }
3001 }
3002 
3003 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3004                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3005   if (HAS_PENDING_EXCEPTION) {
3006     assert(false, "Should not allocate with exception pending");
3007     return NULL;  // caller does a CHECK_NULL too
3008   }
3009 
3010   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3011 
3012   // SSS: Should we align the allocations and make sure the sizes are aligned.
3013   MetaWord* result = NULL;
3014 
3015   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "


3084 
3085   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3086     address ptr = rec->_ptr;
3087     if (last_addr < ptr) {
3088       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3089     }
3090     closure->doit(ptr, rec->_type, rec->_byte_size);
3091     last_addr = ptr + rec->_byte_size;
3092   }
3093 
3094   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3095   if (last_addr < top) {
3096     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3097   }
3098 }
3099 
3100 void Metaspace::purge() {
3101   MutexLockerEx cl(SpaceManager::expand_lock(),
3102                    Mutex::_no_safepoint_check_flag);
3103   space_list()->purge();

3104   class_space_list()->purge();

3105 }
3106 
3107 void Metaspace::print_on(outputStream* out) const {
3108   // Print both class virtual space counts and metaspace.
3109   if (Verbose) {
3110       vsm()->print_on(out);

3111       class_vsm()->print_on(out);
3112   }

3113 }
3114 
3115 bool Metaspace::contains(const void * ptr) {
3116   if (MetaspaceShared::is_in_shared_space(ptr)) {
3117     return true;
3118   }
3119   // This is checked while unlocked.  As long as the virtualspaces are added
3120   // at the end, the pointer will be in one of them.  The virtual spaces
3121   // aren't deleted presently.  When they are, some sort of locking might
3122   // be needed.  Note, locking this can cause inversion problems with the
3123   // caller in MetaspaceObj::is_metadata() function.
3124   return space_list()->contains(ptr) ||
3125          class_space_list()->contains(ptr);
3126 }
3127 
3128 void Metaspace::verify() {
3129   vsm()->verify();

3130   class_vsm()->verify();

3131 }
3132 
3133 void Metaspace::dump(outputStream* const out) const {
3134   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3135   vsm()->dump(out);

3136   out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3137   class_vsm()->dump(out);

3138 }


  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "memory/binaryTreeDictionary.hpp"
  27 #include "memory/freeList.hpp"
  28 #include "memory/collectorPolicy.hpp"
  29 #include "memory/filemap.hpp"
  30 #include "memory/freeList.hpp"
  31 #include "memory/metablock.hpp"
  32 #include "memory/metachunk.hpp"
  33 #include "memory/metaspace.hpp"
  34 #include "memory/metaspaceShared.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/globals.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/mutex.hpp"
  40 #include "runtime/orderAccess.hpp"
  41 #include "services/memTracker.hpp"
  42 #include "utilities/copy.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
  46 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
  47 // Define this macro to enable slow integrity checking of
  48 // the free chunk lists
  49 const bool metaspace_slow_verify = false;
  50 
  51 // Parameters for stress mode testing
  52 const uint metadata_deallocate_a_lot_block = 10;
  53 const uint metadata_deallocate_a_lock_chunk = 3;
  54 size_t const allocation_from_dictionary_limit = 64 * K;
  55 
  56 MetaWord* last_allocated = 0;
  57 
  58 size_t Metaspace::_class_metaspace_size;
  59 
  60 // Used in declarations in SpaceManager and ChunkManager
  61 enum ChunkIndex {
  62   ZeroIndex = 0,
  63   SpecializedIndex = ZeroIndex,
  64   SmallIndex = SpecializedIndex + 1,
  65   MediumIndex = SmallIndex + 1,
  66   HumongousIndex = MediumIndex + 1,
  67   NumberOfFreeLists = 3,
  68   NumberOfInUseLists = 4
  69 };
  70 
  71 enum ChunkSizes {    // in words.
  72   ClassSpecializedChunk = 128,
  73   SpecializedChunk = 128,
  74   ClassSmallChunk = 256,
  75   SmallChunk = 512,
  76   ClassMediumChunk = 4 * K,
  77   MediumChunk = 8 * K,
  78   HumongousChunkGranularity = 8
  79 };


 247   }
 248 }
 249 
 250   void print_on(outputStream* st) const;
 251 };
 252 
 253 class VirtualSpaceNode : public CHeapObj<mtClass> {
 254   friend class VirtualSpaceList;
 255 
 256   // Link to next VirtualSpaceNode
 257   VirtualSpaceNode* _next;
 258 
 259   // total in the VirtualSpace
 260   MemRegion _reserved;
 261   ReservedSpace _rs;
 262   VirtualSpace _virtual_space;
 263   MetaWord* _top;
 264   // count of chunks contained in this VirtualSpace
 265   uintx _container_count;
 266 




 267   // Convenience functions to access the _virtual_space
 268   char* low()  const { return virtual_space()->low(); }
 269   char* high() const { return virtual_space()->high(); }
 270 
 271   // The first Metachunk will be allocated at the bottom of the
 272   // VirtualSpace
 273   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 274 
 275   void inc_container_count();
 276 #ifdef ASSERT
 277   uint container_count_slow();
 278 #endif
 279 
 280  public:
 281 
 282   VirtualSpaceNode(size_t byte_size);
 283   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
 284   ~VirtualSpaceNode();
 285 
 286   // Convenience functions for logical bottom and end
 287   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
 288   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 289 
 290   // address of next available space in _virtual_space;
 291   // Accessors
 292   VirtualSpaceNode* next() { return _next; }
 293   void set_next(VirtualSpaceNode* v) { _next = v; }
 294 
 295   void set_reserved(MemRegion const v) { _reserved = v; }
 296   void set_top(MetaWord* v) { _top = v; }
 297 
 298   // Accessors
 299   MemRegion* reserved() { return &_reserved; }
 300   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 301 
 302   // Returns true if "word_size" is available in the VirtualSpace
 303   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 304 
 305   MetaWord* top() const { return _top; }
 306   void inc_top(size_t word_size) { _top += word_size; }
 307 
 308   uintx container_count() { return _container_count; }
 309   void dec_container_count();


1299 }
1300 
1301 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1302 
1303   // If the user wants a limit, impose one.
1304   // The reason for someone using this flag is to limit reserved space.  So
1305   // for non-class virtual space, compare against virtual spaces that are reserved.
1306   // For class virtual space, we only compare against the committed space, not
1307   // reserved space, because this is a larger space prereserved for compressed
1308   // class pointers.
1309   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
1310     size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
1311               MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
1312     if (real_allocated >= MaxMetaspaceSize) {
1313       return false;
1314     }
1315   }
1316 
1317   // Class virtual space should always be expanded.  Call GC for the other
1318   // metadata virtual space.
1319   if (Metaspace::using_class_space() &&
1320       (vsl == Metaspace::class_space_list())) return true;
1321 
1322   // If this is part of an allocation after a GC, expand
1323   // unconditionally.
1324   if (MetaspaceGC::expand_after_GC()) {
1325     return true;
1326   }
1327 
1328 
1329   // If the capacity is below the minimum capacity, allow the
1330   // expansion.  Also set the high-water-mark (capacity_until_GC)
1331   // to that minimum capacity so that a GC will not be induced
1332   // until that minimum capacity is exceeded.
1333   size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
1334   size_t metaspace_size_bytes = MetaspaceSize;
1335   if (committed_capacity_bytes < metaspace_size_bytes ||
1336       capacity_until_GC() == 0) {
1337     set_capacity_until_GC(metaspace_size_bytes);
1338     return true;
1339   } else {
1340     if (committed_capacity_bytes < capacity_until_GC()) {


2244              "Need branch for ClassSpecializedChunk");
2245       return SpecializedIndex;
2246     case SmallChunk:
2247     case ClassSmallChunk:
2248       return SmallIndex;
2249     case MediumChunk:
2250     case ClassMediumChunk:
2251       return MediumIndex;
2252     default:
2253       assert(size > MediumChunk || size > ClassMediumChunk,
2254              "Not a humongous chunk");
2255       return HumongousIndex;
2256   }
2257 }
2258 
2259 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2260   assert_lock_strong(_lock);
2261   size_t raw_word_size = get_raw_word_size(word_size);
2262   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2263   assert(raw_word_size >= min_size,
2264          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2265   block_freelists()->return_block(p, raw_word_size);
2266 }
2267 
2268 // Adds a chunk to the list of chunks in use.
2269 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2270 
2271   assert(new_chunk != NULL, "Should not be NULL");
2272   assert(new_chunk->next() == NULL, "Should not be on a list");
2273 
2274   new_chunk->reset_empty();
2275 
2276   // Find the correct list and and set the current
2277   // chunk for that list.
2278   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2279 
2280   if (index != HumongousIndex) {
2281     set_current_chunk(new_chunk);
2282     new_chunk->set_next(chunks_in_use(index));
2283     set_chunks_in_use(index, new_chunk);
2284   } else {


2361   }
2362 #endif
2363   // Is there space in the current chunk?
2364   MetaWord* result = NULL;
2365 
2366   // For DumpSharedSpaces, only allocate out of the current chunk which is
2367   // never null because we gave it the size we wanted.   Caller reports out
2368   // of memory if this returns null.
2369   if (DumpSharedSpaces) {
2370     assert(current_chunk() != NULL, "should never happen");
2371     inc_used_metrics(word_size);
2372     return current_chunk()->allocate(word_size); // caller handles null result
2373   }
2374   if (current_chunk() != NULL) {
2375     result = current_chunk()->allocate(word_size);
2376   }
2377 
2378   if (result == NULL) {
2379     result = grow_and_allocate(word_size);
2380   }
2381   if (result != 0) {
2382     inc_used_metrics(word_size);
2383     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2384            "Head of the list is being allocated");
2385   }
2386 
2387   return result;
2388 }
2389 
2390 void SpaceManager::verify() {
2391   // If there are blocks in the dictionary, then
2392   // verfication of chunks does not work since
2393   // being in the dictionary alters a chunk.
2394   if (block_freelists()->total_size() == 0) {
2395     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2396       Metachunk* curr = chunks_in_use(i);
2397       while (curr != NULL) {
2398         curr->verify();
2399         verify_chunk_size(curr);
2400         curr = curr->next();
2401       }


2465   for (ChunkIndex index = ZeroIndex;
2466        index < NumberOfInUseLists;
2467        index = next_chunk_index(index)) {
2468     for (Metachunk* curr = chunks_in_use(index);
2469          curr != NULL;
2470          curr = curr->next()) {
2471       curr->mangle();
2472     }
2473   }
2474 }
2475 #endif // PRODUCT
2476 
2477 // MetaspaceAux
2478 
2479 
2480 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2481 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2482 
2483 size_t MetaspaceAux::free_bytes() {
2484   size_t result = 0;
2485   if (Metaspace::using_class_space() &&
2486       (Metaspace::class_space_list() != NULL)) {
2487     result = result + Metaspace::class_space_list()->free_bytes();
2488   }
2489   if (Metaspace::space_list() != NULL) {
2490     result = result + Metaspace::space_list()->free_bytes();
2491   }
2492   return result;
2493 }
2494 
2495 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2496   assert_lock_strong(SpaceManager::expand_lock());
2497   assert(words <= allocated_capacity_words(mdtype),
2498     err_msg("About to decrement below 0: words " SIZE_FORMAT
2499             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2500             words, mdtype, allocated_capacity_words(mdtype)));
2501   _allocated_capacity_words[mdtype] -= words;
2502 }
2503 
2504 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2505   assert_lock_strong(SpaceManager::expand_lock());
2506   // Needs to be atomic


2537     if (msp != NULL) {
2538       used += msp->used_words_slow(mdtype);
2539     }
2540   }
2541   return used * BytesPerWord;
2542 }
2543 
2544 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2545   size_t free = 0;
2546   ClassLoaderDataGraphMetaspaceIterator iter;
2547   while (iter.repeat()) {
2548     Metaspace* msp = iter.get_next();
2549     if (msp != NULL) {
2550       free += msp->free_words(mdtype);
2551     }
2552   }
2553   return free * BytesPerWord;
2554 }
2555 
2556 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2557   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2558     return 0;
2559   }
2560   // Don't count the space in the freelists.  That space will be
2561   // added to the capacity calculation as needed.
2562   size_t capacity = 0;
2563   ClassLoaderDataGraphMetaspaceIterator iter;
2564   while (iter.repeat()) {
2565     Metaspace* msp = iter.get_next();
2566     if (msp != NULL) {
2567       capacity += msp->capacity_words_slow(mdtype);
2568     }
2569   }
2570   return capacity * BytesPerWord;
2571 }
2572 
2573 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2574   if (mdtype == Metaspace::ClassType) {
2575     return Metaspace::using_class_space() ?
2576            Metaspace::class_space_list()->virtual_space_total() * BytesPerWord : 0;
2577   } else {
2578     return Metaspace::space_list()->virtual_space_total() * BytesPerWord;
2579   }
2580 }
2581 
2582 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2583 
2584 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2585   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2586     return 0;
2587   }
2588   ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2589                          Metaspace::class_space_list()->chunk_manager() :
2590                          Metaspace::space_list()->chunk_manager();
2591   chunk->slow_verify();
2592   return chunk->free_chunks_total();
2593 }
2594 
2595 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2596   return free_chunks_total(mdtype) * BytesPerWord;
2597 }
2598 
2599 size_t MetaspaceAux::free_chunks_total() {
2600   return free_chunks_total(Metaspace::ClassType) +
2601          free_chunks_total(Metaspace::NonClassType);
2602 }
2603 
2604 size_t MetaspaceAux::free_chunks_total_in_bytes() {
2605   return free_chunks_total() * BytesPerWord;
2606 }
2607 


2611     gclog_or_tty->print(" "  SIZE_FORMAT
2612                         "->" SIZE_FORMAT
2613                         "("  SIZE_FORMAT ")",
2614                         prev_metadata_used,
2615                         allocated_used_bytes(),
2616                         reserved_in_bytes());
2617   } else {
2618     gclog_or_tty->print(" "  SIZE_FORMAT "K"
2619                         "->" SIZE_FORMAT "K"
2620                         "("  SIZE_FORMAT "K)",
2621                         prev_metadata_used / K,
2622                         allocated_used_bytes() / K,
2623                         reserved_in_bytes()/ K);
2624   }
2625 
2626   gclog_or_tty->print("]");
2627 }
2628 
2629 // This is printed when PrintGCDetails
2630 void MetaspaceAux::print_on(outputStream* out) {

2631   Metaspace::MetadataType nct = Metaspace::NonClassType;
2632 
2633   out->print_cr(" Metaspace total "
2634                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2635                 " reserved " SIZE_FORMAT "K",
2636                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
2637 
2638   out->print_cr("  data space     "
2639                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2640                 " reserved " SIZE_FORMAT "K",
2641                 allocated_capacity_bytes(nct)/K,
2642                 allocated_used_bytes(nct)/K,
2643                 reserved_in_bytes(nct)/K);
2644   if (Metaspace::using_class_space()) {
2645     Metaspace::MetadataType ct = Metaspace::ClassType;
2646     out->print_cr("  class space    "
2647                   SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2648                   " reserved " SIZE_FORMAT "K",
2649                   allocated_capacity_bytes(ct)/K,
2650                   allocated_used_bytes(ct)/K,
2651                   reserved_in_bytes(ct)/K);
2652   }
2653 }
2654 
2655 // Print information for class space and data space separately.
2656 // This is almost the same as above.
2657 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2658   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2659   size_t capacity_bytes = capacity_bytes_slow(mdtype);
2660   size_t used_bytes = used_bytes_slow(mdtype);
2661   size_t free_bytes = free_in_bytes(mdtype);
2662   size_t used_and_free = used_bytes + free_bytes +
2663                            free_chunks_capacity_bytes;
2664   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
2665              "K + unused in chunks " SIZE_FORMAT "K  + "
2666              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2667              "K  capacity in allocated chunks " SIZE_FORMAT "K",
2668              used_bytes / K,
2669              free_bytes / K,
2670              free_chunks_capacity_bytes / K,
2671              used_and_free / K,
2672              capacity_bytes / K);
2673   // Accounting can only be correct if we got the values during a safepoint
2674   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2675 }
2676 
2677 // Print total fragmentation for class metaspaces
2678 void MetaspaceAux::print_class_waste(outputStream* out) {
2679   assert(Metaspace::using_class_space(), "class metaspace not used");
2680   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2681   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2682   ClassLoaderDataGraphMetaspaceIterator iter;
2683   while (iter.repeat()) {
2684     Metaspace* msp = iter.get_next();
2685     if (msp != NULL) {
2686       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2687       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2688       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2689       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2690       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2691       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2692       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2693     }
2694   }
2695   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2696                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2697                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2698                 "large count " SIZE_FORMAT,
2699                 cls_specialized_count, cls_specialized_waste,
2700                 cls_small_count, cls_small_waste,
2701                 cls_medium_count, cls_medium_waste, cls_humongous_count);
2702 }
2703 
2704 // Print total fragmentation for data and class metaspaces separately
2705 void MetaspaceAux::print_waste(outputStream* out) {
2706   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2707   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;


2708 
2709   ClassLoaderDataGraphMetaspaceIterator iter;
2710   while (iter.repeat()) {
2711     Metaspace* msp = iter.get_next();
2712     if (msp != NULL) {
2713       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2714       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2715       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2716       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2717       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2718       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2719       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);








2720     }
2721   }
2722   out->print_cr("Total fragmentation waste (words) doesn't count free space");
2723   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2724                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2725                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2726                         "large count " SIZE_FORMAT,
2727              specialized_count, specialized_waste, small_count,
2728              small_waste, medium_count, medium_waste, humongous_count);
2729   if (Metaspace::using_class_space()) {
2730     print_class_waste(out);
2731   }




2732 }
2733 
2734 // Dump global metaspace things from the end of ClassLoaderDataGraph
2735 void MetaspaceAux::dump(outputStream* out) {
2736   out->print_cr("All Metaspace:");
2737   out->print("data space: "); print_on(out, Metaspace::NonClassType);
2738   out->print("class space: "); print_on(out, Metaspace::ClassType);
2739   print_waste(out);
2740 }
2741 
2742 void MetaspaceAux::verify_free_chunks() {
2743   Metaspace::space_list()->chunk_manager()->verify();
2744   if (Metaspace::using_class_space()) {
2745     Metaspace::class_space_list()->chunk_manager()->verify();
2746   }
2747 }
2748 
2749 void MetaspaceAux::verify_capacity() {
2750 #ifdef ASSERT
2751   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2752   // For purposes of the running sum of capacity, verify against capacity
2753   size_t capacity_in_use_bytes = capacity_bytes_slow();
2754   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2755     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2756             " capacity_bytes_slow()" SIZE_FORMAT,
2757             running_sum_capacity_bytes, capacity_in_use_bytes));
2758   for (Metaspace::MetadataType i = Metaspace::ClassType;
2759        i < Metaspace:: MetadataTypeCount;
2760        i = (Metaspace::MetadataType)(i + 1)) {
2761     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2762     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2763       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2764               " capacity_bytes_slow(%u)" SIZE_FORMAT,
2765               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2766   }


2788 #endif
2789 }
2790 
2791 void MetaspaceAux::verify_metrics() {
2792   verify_capacity();
2793   verify_used();
2794 }
2795 
2796 
2797 // Metaspace methods
2798 
2799 size_t Metaspace::_first_chunk_word_size = 0;
2800 size_t Metaspace::_first_class_chunk_word_size = 0;
2801 
2802 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2803   initialize(lock, type);
2804 }
2805 
2806 Metaspace::~Metaspace() {
2807   delete _vsm;
2808   if (using_class_space()) {
2809     delete _class_vsm;
2810   }
2811 }
2812 
2813 VirtualSpaceList* Metaspace::_space_list = NULL;
2814 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2815 
2816 #define VIRTUALSPACEMULTIPLIER 2
2817 
2818 #ifdef _LP64
2819 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2820   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
2821   // narrow_klass_base is the lower of the metaspace base and the cds base
2822   // (if cds is enabled).  The narrow_klass_shift depends on the distance
2823   // between the lower base and higher address.
2824   address lower_base;
2825   address higher_address;
2826   if (UseSharedSpaces) {
2827     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2828                           (address)(metaspace_base + class_metaspace_size()));
2829     lower_base = MIN2(metaspace_base, cds_base);
2830   } else {
2831     higher_address = metaspace_base + class_metaspace_size();
2832     lower_base = metaspace_base;
2833   }
2834   Universe::set_narrow_klass_base(lower_base);
2835   if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
2836     Universe::set_narrow_klass_shift(0);
2837   } else {
2838     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2839     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2840   }
2841 }
2842 
2843 // Return TRUE if the specified metaspace_base and cds_base are close enough
2844 // to work with compressed klass pointers.
2845 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2846   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2847   assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
2848   address lower_base = MIN2((address)metaspace_base, cds_base);
2849   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2850                                 (address)(metaspace_base + class_metaspace_size()));
2851   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
2852 }
2853 
2854 // Try to allocate the metaspace at the requested addr.
2855 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2856   assert(using_class_space(), "called improperly");
2857   assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
2858   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
2859          "Metaspace size is too big");
2860 
2861   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
2862                                              os::vm_allocation_granularity(),
2863                                              false, requested_addr, 0);
2864   if (!metaspace_rs.is_reserved()) {
2865     if (UseSharedSpaces) {
2866       // Keep trying to allocate the metaspace, increasing the requested_addr
2867       // by 1GB each time, until we reach an address that will no longer allow
2868       // use of CDS with compressed klass pointers.
2869       char *addr = requested_addr;
2870       while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
2871              can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
2872         addr = addr + 1*G;
2873         metaspace_rs = ReservedSpace(class_metaspace_size(),
2874                                      os::vm_allocation_granularity(), false, addr, 0);
2875       }
2876     }
2877 
2878     // If no successful allocation then try to allocate the space anywhere.  If
2879     // that fails then OOM doom.  At this point we cannot try allocating the
2880     // metaspace as if UseCompressedKlassPointers is off because too much
2881     // initialization has happened that depends on UseCompressedKlassPointers.
2882     // So, UseCompressedKlassPointers cannot be turned off at this point.
2883     if (!metaspace_rs.is_reserved()) {
2884       metaspace_rs = ReservedSpace(class_metaspace_size(),
2885                                    os::vm_allocation_granularity(), false);
2886       if (!metaspace_rs.is_reserved()) {
2887         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
2888                                               class_metaspace_size()));
2889       }
2890     }
2891   }
2892  
2893   // If we got here then the metaspace got allocated.
2894   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
2895 
2896   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
2897   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
2898     FileMapInfo::stop_sharing_and_unmap(
2899         "Could not allocate metaspace at a compatible address");
2900   }
2901 
2902   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
2903                                   UseSharedSpaces ? (address)cds_base : 0);
2904 
2905   initialize_class_space(metaspace_rs);
2906 
2907   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
2908     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
2909                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
2910     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
2911                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
2912   }
2913 }
2914 
2915 // For UseCompressedKlassPointers the class space is reserved above the top of
2916 // the Java heap.  The argument passed in is at the base of the compressed space.
2917 void Metaspace::initialize_class_space(ReservedSpace rs) {
2918   // The reserved space size may be bigger because of alignment, esp with UseLargePages
2919   assert(rs.size() >= ClassMetaspaceSize,
2920          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2921   assert(using_class_space(), "Must be using class space");
2922   _class_space_list = new VirtualSpaceList(rs);
2923 }
2924 
2925 #endif
2926 
2927 void Metaspace::global_initialize() {
2928   // Initialize the alignment for shared spaces.
2929   int max_alignment = os::vm_page_size();
2930   size_t cds_total = 0;
2931 
2932   set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
2933                                          os::vm_allocation_granularity()));
2934 
2935   MetaspaceShared::set_max_alignment(max_alignment);
2936 
2937   if (DumpSharedSpaces) {
2938     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2939     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2940     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
2941     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
2942 
2943     // Initialize with the sum of the shared space sizes.  The read-only
2944     // and read write metaspace chunks will be allocated out of this and the
2945     // remainder is the misc code and data chunks.
2946     cds_total = FileMapInfo::shared_spaces_size();
2947     _space_list = new VirtualSpaceList(cds_total/wordSize);
2948  
2949 #ifdef _LP64
2950     // Set the compressed klass pointer base so that decoding of these pointers works
2951     // properly when creating the shared archive.
2952     assert(UseCompressedOops && UseCompressedKlassPointers,
2953       "UseCompressedOops and UseCompressedKlassPointers must be set");
2954     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
2955     if (TraceMetavirtualspaceAllocation && Verbose) {
2956       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
2957                              _space_list->current_virtual_space()->bottom());
2958     }
2959 
2960     // Set the shift to zero.
2961     assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
2962            "CDS region is too large");
2963     Universe::set_narrow_klass_shift(0);
2964 #endif
2965 
2966   } else {
2967     // If using shared space, open the file that contains the shared space
2968     // and map in the memory before initializing the rest of metaspace (so
2969     // the addresses don't conflict)
2970     address cds_address = NULL;
2971     if (UseSharedSpaces) {
2972       FileMapInfo* mapinfo = new FileMapInfo();
2973       memset(mapinfo, 0, sizeof(FileMapInfo));
2974 
2975       // Open the shared archive file, read and validate the header. If
2976       // initialization fails, shared spaces [UseSharedSpaces] are
2977       // disabled and the file is closed.
2978       // Map in spaces now also
2979       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2980         FileMapInfo::set_current_info(mapinfo);
2981       } else {
2982         assert(!mapinfo->is_open() && !UseSharedSpaces,
2983                "archive file not closed or shared spaces not disabled.");
2984       }
2985       cds_total = FileMapInfo::shared_spaces_size();
2986       cds_address = (address)mapinfo->region_base(0);
2987     }
2988 
2989 #ifdef _LP64
2990     // If UseCompressedKlassPointers is set then allocate the metaspace area
2991     // above the heap and above the CDS area (if it exists).
2992     if (using_class_space()) {
2993       if (UseSharedSpaces) {
2994         allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
2995       } else {
2996         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
2997       }
2998     }
2999 #endif
3000 
3001     // Initialize these before initializing the VirtualSpaceList
3002     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3003     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3004     // Make the first class chunk bigger than a medium chunk so it's not put
3005     // on the medium chunk list.   The next chunk will be small and progress
3006     // from there.  This size calculated by -version.
3007     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3008                                        (ClassMetaspaceSize/BytesPerWord)*2);
3009     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3010     // Arbitrarily set the initial virtual space to a multiple
3011     // of the boot class loader size.
3012     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
3013     // Initialize the list of virtual spaces.
3014     _space_list = new VirtualSpaceList(word_size);
3015   }
3016 }
3017 
3018 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {











3019 
3020   assert(space_list() != NULL,
3021     "Metadata VirtualSpaceList has not been initialized");
3022 
3023   _vsm = new SpaceManager(NonClassType, lock, space_list());
3024   if (_vsm == NULL) {
3025     return;
3026   }
3027   size_t word_size;
3028   size_t class_word_size;
3029   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);


3030 
3031   if (using_class_space()) {
3032     assert(class_space_list() != NULL,
3033       "Class VirtualSpaceList has not been initialized");
3034 
3035     // Allocate SpaceManager for classes.
3036     _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
3037     if (_class_vsm == NULL) {
3038       return;
3039     }
3040   }
3041 
3042   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3043 
3044   // Allocate chunk for metadata objects
3045   Metachunk* new_chunk =
3046      space_list()->get_initialization_chunk(word_size,
3047                                             vsm()->medium_chunk_bunch());
3048   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3049   if (new_chunk != NULL) {
3050     // Add to this manager's list of chunks in use and current_chunk().
3051     vsm()->add_chunk(new_chunk, true);
3052   }
3053 
3054   // Allocate chunk for class metadata objects
3055   if (using_class_space()) {
3056     Metachunk* class_chunk =
3057        class_space_list()->get_initialization_chunk(class_word_size,
3058                                                     class_vsm()->medium_chunk_bunch());
3059     if (class_chunk != NULL) {
3060       class_vsm()->add_chunk(class_chunk, true);
3061     }
3062   }
3063 
3064   _alloc_record_head = NULL;
3065   _alloc_record_tail = NULL;
3066 }
3067 
3068 size_t Metaspace::align_word_size_up(size_t word_size) {
3069   size_t byte_size = word_size * wordSize;
3070   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3071 }
3072 
3073 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3074   // DumpSharedSpaces doesn't use class metadata area (yet)
3075   // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
3076   if (mdtype == ClassType && using_class_space()) {
3077     return  class_vsm()->allocate(word_size);
3078   } else {
3079     return  vsm()->allocate(word_size);
3080   }
3081 }
3082 
3083 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3084   MetaWord* result;
3085   MetaspaceGC::set_expand_after_GC(true);
3086   size_t before_inc = MetaspaceGC::capacity_until_GC();
3087   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
3088   MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3089   if (PrintGCDetails && Verbose) {
3090     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3091       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
3092   }
3093 
3094   result = allocate(word_size, mdtype);
3095 
3096   return result;
3097 }
3098 
3099 // Space allocated in the Metaspace.  This may
3100 // be across several metadata virtual spaces.
3101 char* Metaspace::bottom() const {
3102   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3103   return (char*)vsm()->current_chunk()->bottom();
3104 }
3105 
3106 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3107   if (mdtype == ClassType) {
3108     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3109   } else {
3110     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
3111   }
3112 }
3113 
3114 size_t Metaspace::free_words(MetadataType mdtype) const {
3115   if (mdtype == ClassType) {
3116     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3117   } else {
3118     return vsm()->sum_free_in_chunks_in_use();
3119   }
3120 }
3121 
3122 // Space capacity in the Metaspace.  It includes
3123 // space in the list of chunks from which allocations
3124 // have been made. Don't include space in the global freelist and
3125 // in the space available in the dictionary which
3126 // is already counted in some chunk.
3127 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3128   if (mdtype == ClassType) {
3129     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3130   } else {
3131     return vsm()->sum_capacity_in_chunks_in_use();
3132   }
3133 }
3134 
3135 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3136   return used_words_slow(mdtype) * BytesPerWord;
3137 }
3138 
3139 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3140   return capacity_words_slow(mdtype) * BytesPerWord;
3141 }
3142 
3143 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3144   if (SafepointSynchronize::is_at_safepoint()) {
3145     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3146     // Don't take Heap_lock
3147     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3148     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3149       // Dark matter.  Too small for dictionary.
3150 #ifdef ASSERT
3151       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3152 #endif
3153       return;
3154     }
3155     if (is_class && using_class_space()) {
3156       class_vsm()->deallocate(ptr, word_size);
3157     } else {
3158       vsm()->deallocate(ptr, word_size);
3159     }
3160   } else {
3161     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3162 
3163     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
3164       // Dark matter.  Too small for dictionary.
3165 #ifdef ASSERT
3166       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3167 #endif
3168       return;
3169     }
3170     if (is_class && using_class_space()) {
3171       class_vsm()->deallocate(ptr, word_size);
3172     } else {
3173       vsm()->deallocate(ptr, word_size);
3174     }
3175   }
3176 }
3177 
3178 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3179                               bool read_only, MetaspaceObj::Type type, TRAPS) {
3180   if (HAS_PENDING_EXCEPTION) {
3181     assert(false, "Should not allocate with exception pending");
3182     return NULL;  // caller does a CHECK_NULL too
3183   }
3184 
3185   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3186 
3187   // SSS: Should we align the allocations and make sure the sizes are aligned.
3188   MetaWord* result = NULL;
3189 
3190   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "


3259 
3260   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3261     address ptr = rec->_ptr;
3262     if (last_addr < ptr) {
3263       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3264     }
3265     closure->doit(ptr, rec->_type, rec->_byte_size);
3266     last_addr = ptr + rec->_byte_size;
3267   }
3268 
3269   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3270   if (last_addr < top) {
3271     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3272   }
3273 }
3274 
3275 void Metaspace::purge() {
3276   MutexLockerEx cl(SpaceManager::expand_lock(),
3277                    Mutex::_no_safepoint_check_flag);
3278   space_list()->purge();
3279   if (using_class_space()) {
3280     class_space_list()->purge();
3281   }
3282 }
3283 
3284 void Metaspace::print_on(outputStream* out) const {
3285   // Print both class virtual space counts and metaspace.
3286   if (Verbose) {
3287     vsm()->print_on(out);
3288     if (using_class_space()) {
3289       class_vsm()->print_on(out);
3290     }
3291   }
3292 }
3293 
3294 bool Metaspace::contains(const void * ptr) {
3295   if (MetaspaceShared::is_in_shared_space(ptr)) {
3296     return true;
3297   }
3298   // This is checked while unlocked.  As long as the virtualspaces are added
3299   // at the end, the pointer will be in one of them.  The virtual spaces
3300   // aren't deleted presently.  When they are, some sort of locking might
3301   // be needed.  Note, locking this can cause inversion problems with the
3302   // caller in MetaspaceObj::is_metadata() function.
3303   return space_list()->contains(ptr) ||
3304          (using_class_space() && class_space_list()->contains(ptr));
3305 }
3306 
3307 void Metaspace::verify() {
3308   vsm()->verify();
3309   if (using_class_space()) {
3310     class_vsm()->verify();
3311   }
3312 }
3313 
3314 void Metaspace::dump(outputStream* const out) const {
3315   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3316   vsm()->dump(out);
3317   if (using_class_space()) {
3318     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3319     class_vsm()->dump(out);
3320   }
3321 }
src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File