< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page

        

*** 460,479 **** #endif void print_on(outputStream* st) const; }; ! #define assert_is_ptr_aligned(ptr, alignment) \ ! assert(is_ptr_aligned(ptr, alignment), \ ! PTR_FORMAT " is not aligned to " \ ! SIZE_FORMAT, p2i(ptr), alignment) ! ! #define assert_is_size_aligned(size, alignment) \ ! assert(is_size_aligned(size, alignment), \ ! SIZE_FORMAT " is not aligned to " \ ! SIZE_FORMAT, size, alignment) ! // Decide if large pages should be committed when the memory is reserved. static bool should_commit_large_pages_when_reserving(size_t bytes) { if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { size_t words = bytes / BytesPerWord; --- 460,473 ---- #endif void print_on(outputStream* st) const; }; ! #define assert_is_aligned(value, alignment) \ ! assert(is_aligned((value), (alignment)), \ ! SIZE_FORMAT_HEX " is not aligned to " \ ! SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment)) // Decide if large pages should be committed when the memory is reserved. static bool should_commit_large_pages_when_reserving(size_t bytes) { if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { size_t words = bytes / BytesPerWord;
*** 487,505 **** return false; } // byte_size is the size of the associated virtualspace. VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { ! assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); #if INCLUDE_CDS // This allocates memory with mmap. For DumpSharedspaces, try to reserve // configurable address, generally at the top of the Java heap so other // memory addresses don't conflict. if (DumpSharedSpaces) { bool large_pages = false; // No large pages when dumping the CDS archive. ! char* shared_base = align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); if (_rs.is_reserved()) { assert(shared_base == 0 || _rs.base() == shared_base, "should match"); } else { --- 481,499 ---- return false; } // byte_size is the size of the associated virtualspace. VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { ! assert_is_aligned(bytes, Metaspace::reserve_alignment()); #if INCLUDE_CDS // This allocates memory with mmap. For DumpSharedspaces, try to reserve // configurable address, generally at the top of the Java heap so other // memory addresses don't conflict. if (DumpSharedSpaces) { bool large_pages = false; // No large pages when dumping the CDS archive. ! char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); if (_rs.is_reserved()) { assert(shared_base == 0 || _rs.base() == shared_base, "should match"); } else {
*** 520,531 **** } if (_rs.is_reserved()) { assert(_rs.base() != NULL, "Catch if we get a NULL address"); assert(_rs.size() != 0, "Catch if we get a 0 size"); ! assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); ! assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } } --- 514,525 ---- } if (_rs.is_reserved()) { assert(_rs.base() != NULL, "Catch if we get a NULL address"); assert(_rs.size() != 0, "Catch if we get a 0 size"); ! assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); ! assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } }
*** 861,871 **** // words for data in metaspace. Esentially the minimum size is currently 3 words. size_t get_allocation_word_size(size_t word_size) { size_t byte_size = word_size * BytesPerWord; size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); ! raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); size_t raw_word_size = raw_bytes_size / BytesPerWord; assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); return raw_word_size; --- 855,865 ---- // words for data in metaspace. Esentially the minimum size is currently 3 words. size_t get_allocation_word_size(size_t word_size) { size_t byte_size = word_size * BytesPerWord; size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); ! raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment()); size_t raw_word_size = raw_bytes_size / BytesPerWord; assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); return raw_word_size;
*** 1066,1077 **** } // These are necessary restriction to make sure that the virtual space always // grows in steps of Metaspace::commit_alignment(). If both base and size are // aligned only the middle alignment of the VirtualSpace is used. ! assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); ! assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); // ReservedSpaces marked as special will have the entire memory // pre-committed. Setting a committed size will make sure that // committed_size and actual_committed_size agrees. size_t pre_committed_size = _rs.special() ? _rs.size() : 0; --- 1060,1071 ---- } // These are necessary restriction to make sure that the virtual space always // grows in steps of Metaspace::commit_alignment(). If both base and size are // aligned only the middle alignment of the VirtualSpace is used. ! assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); ! assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); // ReservedSpaces marked as special will have the entire memory // pre-committed. Setting a committed size will make sure that // committed_size and actual_committed_size agrees. size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
*** 1321,1331 **** return false; } // Reserve the space size_t vs_byte_size = vs_word_size * BytesPerWord; ! assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); // Allocate the meta virtual space and initialize it. VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); if (!new_entry->initialize()) { delete new_entry; --- 1315,1325 ---- return false; } // Reserve the space size_t vs_byte_size = vs_word_size * BytesPerWord; ! assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); // Allocate the meta virtual space and initialize it. VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); if (!new_entry->initialize()) { delete new_entry;
*** 1376,1387 **** return result; } bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { ! assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); ! assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); assert(min_words <= preferred_words, "Invalid arguments"); if (!MetaspaceGC::can_expand(min_words, this->is_class())) { return false; } --- 1370,1381 ---- return result; } bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { ! assert_is_aligned(min_words, Metaspace::commit_alignment_words()); ! assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); assert(min_words <= preferred_words, "Invalid arguments"); if (!MetaspaceGC::can_expand(min_words, this->is_class())) { return false; }
*** 1402,1412 **** } retire_current_virtual_space(); // Get another virtual space. size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); ! grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); if (create_new_virtual_space(grow_vs_words)) { if (current_virtual_space()->is_pre_committed()) { // The memory was pre-committed, so we are done here. assert(min_words <= current_virtual_space()->committed_words(), --- 1396,1406 ---- } retire_current_virtual_space(); // Get another virtual space. size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); ! grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); if (create_new_virtual_space(grow_vs_words)) { if (current_virtual_space()->is_pre_committed()) { // The memory was pre-committed, so we are done here. assert(min_words <= current_virtual_space()->committed_words(),
*** 1433,1444 **** } // The expand amount is currently only determined by the requested sizes // and not how much committed memory is left in the current virtual space. ! size_t min_word_size = align_size_up(chunk_word_size, Metaspace::commit_alignment_words()); ! size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); if (min_word_size >= preferred_word_size) { // Can happen when humongous chunks are allocated. preferred_word_size = min_word_size; } --- 1427,1438 ---- } // The expand amount is currently only determined by the requested sizes // and not how much committed memory is left in the current virtual space. ! size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words()); ! size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); if (min_word_size >= preferred_word_size) { // Can happen when humongous chunks are allocated. preferred_word_size = min_word_size; }
*** 1486,1496 **** // If that is still not enough, expand by the size of the allocation // plus some. size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { size_t min_delta = MinMetaspaceExpansion; size_t max_delta = MaxMetaspaceExpansion; ! size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); if (delta <= min_delta) { delta = min_delta; } else if (delta <= max_delta) { // Don't want to hit the high water mark on the next --- 1480,1490 ---- // If that is still not enough, expand by the size of the allocation // plus some. size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { size_t min_delta = MinMetaspaceExpansion; size_t max_delta = MaxMetaspaceExpansion; ! size_t delta = align_up(bytes, Metaspace::commit_alignment()); if (delta <= min_delta) { delta = min_delta; } else if (delta <= max_delta) { // Don't want to hit the high water mark on the next
*** 1501,1511 **** // This allocation is large but the next ones are probably not // so increase by the minimum. delta = delta + min_delta; } ! assert_is_size_aligned(delta, Metaspace::commit_alignment()); return delta; } size_t MetaspaceGC::capacity_until_GC() { --- 1495,1505 ---- // This allocation is large but the next ones are probably not // so increase by the minimum. delta = delta + min_delta; } ! assert_is_aligned(delta, Metaspace::commit_alignment()); return delta; } size_t MetaspaceGC::capacity_until_GC() {
*** 1513,1530 **** assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { ! assert_is_size_aligned(v, Metaspace::commit_alignment()); size_t capacity_until_GC = (size_t) _capacity_until_GC; size_t new_value = capacity_until_GC + v; if (new_value < capacity_until_GC) { // The addition wrapped around, set new_value to aligned max value. ! new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); } intptr_t expected = (intptr_t) capacity_until_GC; intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); --- 1507,1524 ---- assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { ! assert_is_aligned(v, Metaspace::commit_alignment()); size_t capacity_until_GC = (size_t) _capacity_until_GC; size_t new_value = capacity_until_GC + v; if (new_value < capacity_until_GC) { // The addition wrapped around, set new_value to aligned max value. ! new_value = align_down(max_uintx, Metaspace::commit_alignment()); } intptr_t expected = (intptr_t) capacity_until_GC; intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
*** 1540,1550 **** } return true; } size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { ! assert_is_size_aligned(v, Metaspace::commit_alignment()); return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); } void MetaspaceGC::initialize() { --- 1534,1544 ---- } return true; } size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { ! assert_is_aligned(v, Metaspace::commit_alignment()); return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); } void MetaspaceGC::initialize() {
*** 1626,1636 **** size_t shrink_bytes = 0; if (capacity_until_GC < minimum_desired_capacity) { // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; ! expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { size_t new_capacity_until_GC = 0; bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); assert(succeeded, "Should always succesfully increment HWM when at safepoint"); --- 1620,1630 ---- size_t shrink_bytes = 0; if (capacity_until_GC < minimum_desired_capacity) { // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; ! expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { size_t new_capacity_until_GC = 0; bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); assert(succeeded, "Should always succesfully increment HWM when at safepoint");
*** 1679,1689 **** // damp the shrinking: 0% on the first call, 10% on the second call, 40% // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. shrink_bytes = shrink_bytes / 100 * current_shrink_factor; ! shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, shrink_bytes, max_shrink_bytes); if (current_shrink_factor == 0) { --- 1673,1683 ---- // damp the shrinking: 0% on the first call, 10% on the second call, 40% // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. shrink_bytes = shrink_bytes / 100 * current_shrink_factor; ! shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, shrink_bytes, max_shrink_bytes); if (current_shrink_factor == 0) {
*** 2238,2248 **** // Might still need a humongous chunk. Enforce // humongous allocations sizes to be aligned up to // the smallest chunk size. size_t if_humongous_sized_chunk = ! align_size_up(word_size + Metachunk::overhead(), smallest_chunk_size()); chunk_word_size = MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); assert(!SpaceManager::is_humongous(word_size) || --- 2232,2242 ---- // Might still need a humongous chunk. Enforce // humongous allocations sizes to be aligned up to // the smallest chunk size. size_t if_humongous_sized_chunk = ! align_up(word_size + Metachunk::overhead(), smallest_chunk_size()); chunk_word_size = MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); assert(!SpaceManager::is_humongous(word_size) ||
*** 3097,3109 **** void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { assert(using_class_space(), "called improperly"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, "Metaspace size is too big"); ! assert_is_ptr_aligned(requested_addr, _reserve_alignment); ! assert_is_ptr_aligned(cds_base, _reserve_alignment); ! assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); // Don't use large pages for the class space. bool large_pages = false; #if !(defined(AARCH64) || defined(AIX)) --- 3091,3103 ---- void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { assert(using_class_space(), "called improperly"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, "Metaspace size is too big"); ! assert_is_aligned(requested_addr, _reserve_alignment); ! assert_is_aligned(cds_base, _reserve_alignment); ! assert_is_aligned(compressed_class_space_size(), _reserve_alignment); // Don't use large pages for the class space. bool large_pages = false; #if !(defined(AARCH64) || defined(AIX))
*** 3128,3138 **** // klass with a single MOVK instruction. We can do this iff the // compressed class base is a multiple of 4G. // Aix: Search for a place where we can find memory. If we need to load // the base, 4G alignment is helpful, too. size_t increment = AARCH64_ONLY(4*)G; ! for (char *a = align_ptr_up(requested_addr, increment); a < (char*)(1024*G); a += increment) { if (a == (char *)(32*G)) { // Go faster from here on. Zero-based is no longer possible. increment = 4*G; --- 3122,3132 ---- // klass with a single MOVK instruction. We can do this iff the // compressed class base is a multiple of 4G. // Aix: Search for a place where we can find memory. If we need to load // the base, 4G alignment is helpful, too. size_t increment = AARCH64_ONLY(4*)G; ! for (char *a = align_up(requested_addr, increment); a < (char*)(1024*G); a += increment) { if (a == (char *)(32*G)) { // Go faster from here on. Zero-based is no longer possible. increment = 4*G;
*** 3163,3173 **** #endif // AARCH64 if (!metaspace_rs.is_reserved()) { #if INCLUDE_CDS if (UseSharedSpaces) { ! size_t increment = align_size_up(1*G, _reserve_alignment); // Keep trying to allocate the metaspace, increasing the requested_addr // by 1GB each time, until we reach an address that will no longer allow // use of CDS with compressed klass pointers. char *addr = requested_addr; --- 3157,3167 ---- #endif // AARCH64 if (!metaspace_rs.is_reserved()) { #if INCLUDE_CDS if (UseSharedSpaces) { ! size_t increment = align_up(1*G, _reserve_alignment); // Keep trying to allocate the metaspace, increasing the requested_addr // by 1GB each time, until we reach an address that will no longer allow // use of CDS with compressed klass pointers. char *addr = requested_addr;
*** 3267,3290 **** // java.lang.management.MemoryUsage API. // // Ideally, we would be able to set the default value of MaxMetaspaceSize in // globals.hpp to the aligned value, but this is not possible, since the // alignment depends on other flags being parsed. ! MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); if (MetaspaceSize > MaxMetaspaceSize) { MetaspaceSize = MaxMetaspaceSize; } ! MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); ! MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); ! MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); ! CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); set_compressed_class_space_size(CompressedClassSpaceSize); } void Metaspace::global_initialize() { MetaspaceGC::initialize(); --- 3261,3284 ---- // java.lang.management.MemoryUsage API. // // Ideally, we would be able to set the default value of MaxMetaspaceSize in // globals.hpp to the aligned value, but this is not possible, since the // alignment depends on other flags being parsed. ! MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment); if (MetaspaceSize > MaxMetaspaceSize) { MetaspaceSize = MaxMetaspaceSize; } ! MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment); assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); ! MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment); ! MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment); ! CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment); set_compressed_class_space_size(CompressedClassSpaceSize); } void Metaspace::global_initialize() { MetaspaceGC::initialize();
*** 3297,3316 **** if (DumpSharedSpaces) { #if INCLUDE_CDS MetaspaceShared::estimate_regions_size(); ! SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); ! SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); ! SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); ! SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); // Initialize with the sum of the shared space sizes. The read-only // and read write metaspace chunks will be allocated out of this and the // remainder is the misc code and data chunks. cds_total = FileMapInfo::shared_spaces_size(); ! cds_total = align_size_up(cds_total, _reserve_alignment); _space_list = new VirtualSpaceList(cds_total/wordSize); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); if (!_space_list->initialization_succeeded()) { vm_exit_during_initialization("Unable to dump shared archive.", NULL); --- 3291,3310 ---- if (DumpSharedSpaces) { #if INCLUDE_CDS MetaspaceShared::estimate_regions_size(); ! SharedReadOnlySize = align_up(SharedReadOnlySize, max_alignment); ! SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment); ! SharedMiscDataSize = align_up(SharedMiscDataSize, max_alignment); ! SharedMiscCodeSize = align_up(SharedMiscCodeSize, max_alignment); // Initialize with the sum of the shared space sizes. The read-only // and read write metaspace chunks will be allocated out of this and the // remainder is the misc code and data chunks. cds_total = FileMapInfo::shared_spaces_size(); ! cds_total = align_up(cds_total, _reserve_alignment); _space_list = new VirtualSpaceList(cds_total/wordSize); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); if (!_space_list->initialization_succeeded()) { vm_exit_during_initialization("Unable to dump shared archive.", NULL);
*** 3353,3363 **** cds_total = FileMapInfo::shared_spaces_size(); cds_address = (address)mapinfo->header()->region_addr(0); #ifdef _LP64 if (using_class_space()) { char* cds_end = (char*)(cds_address + cds_total); ! cds_end = align_ptr_up(cds_end, _reserve_alignment); // If UseCompressedClassPointers is set then allocate the metaspace area // above the heap and above the CDS area (if it exists). allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); // Map the shared string space after compressed pointers // because it relies on compressed class pointers setting to work --- 3347,3357 ---- cds_total = FileMapInfo::shared_spaces_size(); cds_address = (address)mapinfo->header()->region_addr(0); #ifdef _LP64 if (using_class_space()) { char* cds_end = (char*)(cds_address + cds_total); ! cds_end = align_up(cds_end, _reserve_alignment); // If UseCompressedClassPointers is set then allocate the metaspace area // above the heap and above the CDS area (if it exists). allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); // Map the shared string space after compressed pointers // because it relies on compressed class pointers setting to work
*** 3371,3381 **** } #endif // INCLUDE_CDS #ifdef _LP64 if (!UseSharedSpaces && using_class_space()) { ! char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); allocate_metaspace_compressed_klass_ptrs(base, 0); } #endif // _LP64 // Initialize these before initializing the VirtualSpaceList --- 3365,3375 ---- } #endif // INCLUDE_CDS #ifdef _LP64 if (!UseSharedSpaces && using_class_space()) { ! char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); allocate_metaspace_compressed_klass_ptrs(base, 0); } #endif // _LP64 // Initialize these before initializing the VirtualSpaceList
*** 3388,3398 **** (CompressedClassSpaceSize/BytesPerWord)*2); _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); // Arbitrarily set the initial virtual space to a multiple // of the boot class loader size. size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; ! word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); --- 3382,3392 ---- (CompressedClassSpaceSize/BytesPerWord)*2); _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); // Arbitrarily set the initial virtual space to a multiple // of the boot class loader size. size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; ! word_size = align_up(word_size, Metaspace::reserve_alignment_words()); // Initialize the list of virtual spaces. _space_list = new VirtualSpaceList(word_size); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
*** 3470,3480 **** _alloc_record_tail = NULL; } size_t Metaspace::align_word_size_up(size_t word_size) { size_t byte_size = word_size * wordSize; ! return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; } MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { // DumpSharedSpaces doesn't use class metadata area (yet) // Also, don't use class_vsm() unless UseCompressedClassPointers is true. --- 3464,3474 ---- _alloc_record_tail = NULL; } size_t Metaspace::align_word_size_up(size_t word_size) { size_t byte_size = word_size * wordSize; ! return ReservedSpace::allocation_align_up(byte_size) / wordSize; } MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { // DumpSharedSpaces doesn't use class metadata area (yet) // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
*** 4141,4151 **** const int rand = os::random() % 4; if (rand < 3) { return sizes[rand]; } else { // Note: this affects the max. size of space (see _vsn initialization in ctor). ! return align_size_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); } } // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found. --- 4135,4145 ---- const int rand = os::random() % 4; if (rand < 3) { return sizes[rand]; } else { // Note: this affects the max. size of space (see _vsn initialization in ctor). ! return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); } } // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
*** 4288,4298 **** } public: ChunkManagerReturnTestImpl() ! : _vsn(align_size_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) , _cm(SpecializedChunk, SmallChunk, MediumChunk) , _chunks_in_chunkmanager(0) , _words_in_chunkmanager(0) { MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); --- 4282,4292 ---- } public: ChunkManagerReturnTestImpl() ! : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) , _cm(SpecializedChunk, SmallChunk, MediumChunk) , _chunks_in_chunkmanager(0) , _words_in_chunkmanager(0) { MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
< prev index next >