src/share/vm/memory/metaspace.cpp

Print this page
rev 6853 : 8046070: Class Data Sharing clean up and refactoring
Summary: Cleaned up CDS to be more configurable, maintainable and extensible
Reviewed-by: dholmes, coleenp, acorn, mchung

@@ -411,10 +411,11 @@
 
   // byte_size is the size of the associated virtualspace.
 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 
+#if INCLUDE_CDS
   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   // configurable address, generally at the top of the Java heap so other
   // memory addresses don't conflict.
   if (DumpSharedSpaces) {
     bool large_pages = false; // No large pages when dumping the CDS archive.

@@ -426,11 +427,13 @@
     } else {
       // Get a mmap region anywhere if the SharedBaseAddress fails.
       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
     }
     MetaspaceShared::set_shared_rs(&_rs);
-  } else {
+  } else
+#endif
+  {
     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 
     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   }
 

@@ -2937,15 +2940,18 @@
   // narrow_klass_base is the lower of the metaspace base and the cds base
   // (if cds is enabled).  The narrow_klass_shift depends on the distance
   // between the lower base and higher address.
   address lower_base;
   address higher_address;
+#if INCLUDE_CDS
   if (UseSharedSpaces) {
     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                           (address)(metaspace_base + compressed_class_space_size()));
     lower_base = MIN2(metaspace_base, cds_base);
-  } else {
+  } else
+#endif
+  {
     higher_address = metaspace_base + compressed_class_space_size();
     lower_base = metaspace_base;
 
     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
     // If compressed class space fits in lower 32G, we don't need a base.

@@ -2962,20 +2968,22 @@
     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
   }
 }
 
+#if INCLUDE_CDS
 // Return TRUE if the specified metaspace_base and cds_base are close enough
 // to work with compressed klass pointers.
 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   address lower_base = MIN2((address)metaspace_base, cds_base);
   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                                 (address)(metaspace_base + compressed_class_space_size()));
   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
 }
+#endif
 
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
   assert(using_class_space(), "called improperly");
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");

@@ -2991,10 +2999,11 @@
   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                              _reserve_alignment,
                                              large_pages,
                                              requested_addr, 0);
   if (!metaspace_rs.is_reserved()) {
+#if INCLUDE_CDS
     if (UseSharedSpaces) {
       size_t increment = align_size_up(1*G, _reserve_alignment);
 
       // Keep trying to allocate the metaspace, increasing the requested_addr
       // by 1GB each time, until we reach an address that will no longer allow

@@ -3005,11 +3014,11 @@
         addr = addr + increment;
         metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                      _reserve_alignment, large_pages, addr, 0);
       }
     }
-
+#endif
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
     // metaspace as if UseCompressedClassPointers is off because too much
     // initialization has happened that depends on UseCompressedClassPointers.
     // So, UseCompressedClassPointers cannot be turned off at this point.

@@ -3024,16 +3033,17 @@
   }
 
   // If we got here then the metaspace got allocated.
   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
 
+#if INCLUDE_CDS
   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
     FileMapInfo::stop_sharing_and_unmap(
         "Could not allocate metaspace at a compatible address");
   }
-
+#endif
   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
                                   UseSharedSpaces ? (address)cds_base : 0);
 
   initialize_class_space(metaspace_rs);
 

@@ -3113,10 +3123,11 @@
   size_t cds_total = 0;
 
   MetaspaceShared::set_max_alignment(max_alignment);
 
   if (DumpSharedSpaces) {
+#if INCLUDE_CDS
     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 

@@ -3150,49 +3161,50 @@
       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
                              _space_list->current_virtual_space()->bottom());
     }
 
     Universe::set_narrow_klass_shift(0);
-#endif
-
+#endif // _LP64
+#endif // INCLUDE_CDS
   } else {
+#if INCLUDE_CDS
     // If using shared space, open the file that contains the shared space
     // and map in the memory before initializing the rest of metaspace (so
     // the addresses don't conflict)
     address cds_address = NULL;
     if (UseSharedSpaces) {
       FileMapInfo* mapinfo = new FileMapInfo();
-      memset(mapinfo, 0, sizeof(FileMapInfo));
 
       // Open the shared archive file, read and validate the header. If
       // initialization fails, shared spaces [UseSharedSpaces] are
       // disabled and the file is closed.
       // Map in spaces now also
       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
-        FileMapInfo::set_current_info(mapinfo);
         cds_total = FileMapInfo::shared_spaces_size();
         cds_address = (address)mapinfo->region_base(0);
       } else {
         assert(!mapinfo->is_open() && !UseSharedSpaces,
                "archive file not closed or shared spaces not disabled.");
       }
     }
-
+#endif // INCLUDE_CDS
 #ifdef _LP64
     // If UseCompressedClassPointers is set then allocate the metaspace area
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
+#if INCLUDE_CDS
         char* cds_end = (char*)(cds_address + cds_total);
         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
+#endif
       } else {
         char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(base, 0);
       }
     }
-#endif
+#endif // _LP64
 
     // Initialize these before initializing the VirtualSpaceList
     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
     // Make the first class chunk bigger than a medium chunk so it's not put

@@ -3378,10 +3390,14 @@
 
 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
   assert(!SafepointSynchronize::is_at_safepoint()
          || Thread::current()->is_VM_thread(), "should be the VM thread");
 
+  if (DumpSharedSpaces && PrintSharedSpaces) {
+    record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
+  }
+
   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     // Dark matter.  Too small for dictionary.
 #ifdef ASSERT

@@ -3415,12 +3431,13 @@
     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
     MetaWord* result = space->allocate(word_size, NonClassType);
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
     }
-
+    if (PrintSharedSpaces) {
     space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+    }
 
     // Zero initialize.
     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
 
     return result;

@@ -3515,19 +3532,59 @@
 }
 
 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
   assert(DumpSharedSpaces, "sanity");
 
-  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
+  int byte_size = (int)word_size * HeapWordSize;
+  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
+
   if (_alloc_record_head == NULL) {
     _alloc_record_head = _alloc_record_tail = rec;
-  } else {
+  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
     _alloc_record_tail->_next = rec;
     _alloc_record_tail = rec;
+  } else {
+    // slow linear search, but this doesn't happen that often, and only when dumping
+    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
+      if (old->_ptr == ptr) {
+        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
+        int remain_bytes = old->_byte_size - byte_size;
+        assert(remain_bytes >= 0, "sanity");
+        old->_type = type;
+
+        if (remain_bytes == 0) {
+          delete(rec);
+        } else {
+          address remain_ptr = address(ptr) + byte_size;
+          rec->_ptr = remain_ptr;
+          rec->_byte_size = remain_bytes;
+          rec->_type = MetaspaceObj::DeallocatedType;
+          rec->_next = old->_next;
+          old->_byte_size = byte_size;
+          old->_next = rec;
+        }
+        return;
+      }
+    }
+    assert(0, "reallocating a freed pointer that was not recorded");
   }
 }
 
+void Metaspace::record_deallocation(void* ptr, size_t word_size) {
+  assert(DumpSharedSpaces, "sanity");
+
+  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
+    if (rec->_ptr == ptr) {
+      assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
+      rec->_type = MetaspaceObj::DeallocatedType;
+      return;
+    }
+  }
+
+  assert(0, "deallocating a pointer that was not recorded");
+}
+
 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
 
   address last_addr = (address)bottom();