< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page
rev 7386 : 8064457: Introduce compressed oops mode disjoint base and improve compressed heap handling.


 404         MetaspaceGC::allowed_expansion() >= words) {
 405       return true;
 406     }
 407   }
 408 
 409   return false;
 410 }
 411 
 412   // byte_size is the size of the associated virtualspace.
 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 414   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 415 
 416 #if INCLUDE_CDS
 417   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 418   // configurable address, generally at the top of the Java heap so other
 419   // memory addresses don't conflict.
 420   if (DumpSharedSpaces) {
 421     bool large_pages = false; // No large pages when dumping the CDS archive.
 422     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 423 
 424     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 425     if (_rs.is_reserved()) {
 426       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 427     } else {
 428       // Get a mmap region anywhere if the SharedBaseAddress fails.
 429       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 430     }
 431     MetaspaceShared::set_shared_rs(&_rs);
 432   } else
 433 #endif
 434   {
 435     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 436 
 437     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 438   }
 439 
 440   if (_rs.is_reserved()) {
 441     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 442     assert(_rs.size() != 0, "Catch if we get a 0 size");
 443     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 444     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());


3006   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3007 }
3008 #endif
3009 
3010 // Try to allocate the metaspace at the requested addr.
3011 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3012   assert(using_class_space(), "called improperly");
3013   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3014   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3015          "Metaspace size is too big");
3016   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3017   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3018   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3019 
3020   // Don't use large pages for the class space.
3021   bool large_pages = false;
3022 
3023   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3024                                              _reserve_alignment,
3025                                              large_pages,
3026                                              requested_addr, 0);
3027   if (!metaspace_rs.is_reserved()) {
3028 #if INCLUDE_CDS
3029     if (UseSharedSpaces) {
3030       size_t increment = align_size_up(1*G, _reserve_alignment);
3031 
3032       // Keep trying to allocate the metaspace, increasing the requested_addr
3033       // by 1GB each time, until we reach an address that will no longer allow
3034       // use of CDS with compressed klass pointers.
3035       char *addr = requested_addr;
3036       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3037              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3038         addr = addr + increment;
3039         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3040                                      _reserve_alignment, large_pages, addr, 0);
3041       }
3042     }
3043 #endif
3044     // If no successful allocation then try to allocate the space anywhere.  If
3045     // that fails then OOM doom.  At this point we cannot try allocating the
3046     // metaspace as if UseCompressedClassPointers is off because too much
3047     // initialization has happened that depends on UseCompressedClassPointers.
3048     // So, UseCompressedClassPointers cannot be turned off at this point.
3049     if (!metaspace_rs.is_reserved()) {
3050       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3051                                    _reserve_alignment, large_pages);
3052       if (!metaspace_rs.is_reserved()) {
3053         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3054                                               compressed_class_space_size()));
3055       }
3056     }
3057   }
3058 
3059   // If we got here then the metaspace got allocated.
3060   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);




 404         MetaspaceGC::allowed_expansion() >= words) {
 405       return true;
 406     }
 407   }
 408 
 409   return false;
 410 }
 411 
 412   // byte_size is the size of the associated virtualspace.
 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 414   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 415 
 416 #if INCLUDE_CDS
 417   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 418   // configurable address, generally at the top of the Java heap so other
 419   // memory addresses don't conflict.
 420   if (DumpSharedSpaces) {
 421     bool large_pages = false; // No large pages when dumping the CDS archive.
 422     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 423 
 424     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 425     if (_rs.is_reserved()) {
 426       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 427     } else {
 428       // Get a mmap region anywhere if the SharedBaseAddress fails.
 429       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 430     }
 431     MetaspaceShared::set_shared_rs(&_rs);
 432   } else
 433 #endif
 434   {
 435     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 436 
 437     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 438   }
 439 
 440   if (_rs.is_reserved()) {
 441     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 442     assert(_rs.size() != 0, "Catch if we get a 0 size");
 443     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 444     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());


3006   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3007 }
3008 #endif
3009 
3010 // Try to allocate the metaspace at the requested addr.
3011 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3012   assert(using_class_space(), "called improperly");
3013   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3014   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3015          "Metaspace size is too big");
3016   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3017   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3018   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3019 
3020   // Don't use large pages for the class space.
3021   bool large_pages = false;
3022 
3023   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3024                                              _reserve_alignment,
3025                                              large_pages,
3026                                              requested_addr);
3027   if (!metaspace_rs.is_reserved()) {
3028 #if INCLUDE_CDS
3029     if (UseSharedSpaces) {
3030       size_t increment = align_size_up(1*G, _reserve_alignment);
3031 
3032       // Keep trying to allocate the metaspace, increasing the requested_addr
3033       // by 1GB each time, until we reach an address that will no longer allow
3034       // use of CDS with compressed klass pointers.
3035       char *addr = requested_addr;
3036       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3037              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3038         addr = addr + increment;
3039         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3040                                      _reserve_alignment, large_pages, addr);
3041       }
3042     }
3043 #endif
3044     // If no successful allocation then try to allocate the space anywhere.  If
3045     // that fails then OOM doom.  At this point we cannot try allocating the
3046     // metaspace as if UseCompressedClassPointers is off because too much
3047     // initialization has happened that depends on UseCompressedClassPointers.
3048     // So, UseCompressedClassPointers cannot be turned off at this point.
3049     if (!metaspace_rs.is_reserved()) {
3050       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3051                                    _reserve_alignment, large_pages);
3052       if (!metaspace_rs.is_reserved()) {
3053         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3054                                               compressed_class_space_size()));
3055       }
3056     }
3057   }
3058 
3059   // If we got here then the metaspace got allocated.
3060   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);


< prev index next >