--- old/src/hotspot/share/memory/metaspace.cpp 2019-07-22 11:08:01.849652220 +0200 +++ new/src/hotspot/share/memory/metaspace.cpp 2019-07-22 11:08:01.637649948 +0200 @@ -31,8 +31,8 @@ #include "memory/filemap.hpp" #include "memory/metaspace.hpp" #include "memory/metaspace/chunkManager.hpp" +#include "memory/metaspace/chunkLevel.hpp" #include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp" #include "memory/metaspace/spaceManager.hpp" #include "memory/metaspace/virtualSpaceList.hpp" @@ -59,298 +59,8 @@ DEBUG_ONLY(bool Metaspace::_frozen = false;) -static const char* space_type_name(Metaspace::MetaspaceType t) { - const char* s = NULL; - switch (t) { - case Metaspace::StandardMetaspaceType: s = "Standard"; break; - case Metaspace::BootMetaspaceType: s = "Boot"; break; - case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break; - case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break; - default: ShouldNotReachHere(); - } - return s; -} - -volatile size_t MetaspaceGC::_capacity_until_GC = 0; -uint MetaspaceGC::_shrink_factor = 0; -bool MetaspaceGC::_should_concurrent_collect = false; - -// BlockFreelist methods - -// VirtualSpaceNode methods - -// MetaspaceGC methods - -// VM_CollectForMetadataAllocation is the vm operation used to GC. -// Within the VM operation after the GC the attempt to allocate the metadata -// should succeed. If the GC did not free enough space for the metaspace -// allocation, the HWM is increased so that another virtualspace will be -// allocated for the metadata. With perm gen the increase in the perm -// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The -// metaspace policy uses those as the small and large steps for the HWM. -// -// After the GC the compute_new_size() for MetaspaceGC is called to -// resize the capacity of the metaspaces. The current implementation -// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used -// to resize the Java heap by some GC's. New flags can be implemented -// if really needed. MinMetaspaceFreeRatio is used to calculate how much -// free space is desirable in the metaspace capacity to decide how much -// to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much -// free space is desirable in the metaspace capacity before decreasing -// the HWM. - -// Calculate the amount to increase the high water mark (HWM). -// Increase by a minimum amount (MinMetaspaceExpansion) so that -// another expansion is not requested too soon. If that is not -// enough to satisfy the allocation, increase by MaxMetaspaceExpansion. -// If that is still not enough, expand by the size of the allocation -// plus some. -size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { - size_t min_delta = MinMetaspaceExpansion; - size_t max_delta = MaxMetaspaceExpansion; - size_t delta = align_up(bytes, Metaspace::commit_alignment()); - - if (delta <= min_delta) { - delta = min_delta; - } else if (delta <= max_delta) { - // Don't want to hit the high water mark on the next - // allocation so make the delta greater than just enough - // for this allocation. - delta = max_delta; - } else { - // This allocation is large but the next ones are probably not - // so increase by the minimum. - delta = delta + min_delta; - } - - assert_is_aligned(delta, Metaspace::commit_alignment()); - - return delta; -} - -size_t MetaspaceGC::capacity_until_GC() { - size_t value = OrderAccess::load_acquire(&_capacity_until_GC); - assert(value >= MetaspaceSize, "Not initialized properly?"); - return value; -} -// Try to increase the _capacity_until_GC limit counter by v bytes. -// Returns true if it succeeded. It may fail if either another thread -// concurrently increased the limit or the new limit would be larger -// than MaxMetaspaceSize. -// On success, optionally returns new and old metaspace capacity in -// new_cap_until_GC and old_cap_until_GC respectively. -// On error, optionally sets can_retry to indicate whether if there is -// actually enough space remaining to satisfy the request. -bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) { - assert_is_aligned(v, Metaspace::commit_alignment()); - size_t old_capacity_until_GC = _capacity_until_GC; - size_t new_value = old_capacity_until_GC + v; - - if (new_value < old_capacity_until_GC) { - // The addition wrapped around, set new_value to aligned max value. - new_value = align_down(max_uintx, Metaspace::commit_alignment()); - } - - if (new_value > MaxMetaspaceSize) { - if (can_retry != NULL) { - *can_retry = false; - } - return false; - } - - if (can_retry != NULL) { - *can_retry = true; - } - size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC); - - if (old_capacity_until_GC != prev_value) { - return false; - } - - if (new_cap_until_GC != NULL) { - *new_cap_until_GC = new_value; - } - if (old_cap_until_GC != NULL) { - *old_cap_until_GC = old_capacity_until_GC; - } - return true; -} - -size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { - assert_is_aligned(v, Metaspace::commit_alignment()); - - return Atomic::sub(v, &_capacity_until_GC); -} - -void MetaspaceGC::initialize() { - // Set the high-water mark to MaxMetapaceSize during VM initializaton since - // we can't do a GC during initialization. - _capacity_until_GC = MaxMetaspaceSize; -} - -void MetaspaceGC::post_initialize() { - // Reset the high-water mark once the VM initialization is done. - _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); -} - -bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { - // Check if the compressed class space is full. - if (is_class && Metaspace::using_class_space()) { - size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); - if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { - log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", - (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); - return false; - } - } - - // Check if the user has imposed a limit on the metaspace memory. - size_t committed_bytes = MetaspaceUtils::committed_bytes(); - if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { - log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", - (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); - return false; - } - - return true; -} - -size_t MetaspaceGC::allowed_expansion() { - size_t committed_bytes = MetaspaceUtils::committed_bytes(); - size_t capacity_until_gc = capacity_until_GC(); - - assert(capacity_until_gc >= committed_bytes, - "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, - capacity_until_gc, committed_bytes); - - size_t left_until_max = MaxMetaspaceSize - committed_bytes; - size_t left_until_GC = capacity_until_gc - committed_bytes; - size_t left_to_commit = MIN2(left_until_GC, left_until_max); - log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT - " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", - left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); - - return left_to_commit / BytesPerWord; -} - -void MetaspaceGC::compute_new_size() { - assert(_shrink_factor <= 100, "invalid shrink factor"); - uint current_shrink_factor = _shrink_factor; - _shrink_factor = 0; - - // Using committed_bytes() for used_after_gc is an overestimation, since the - // chunk free lists are included in committed_bytes() and the memory in an - // un-fragmented chunk free list is available for future allocations. - // However, if the chunk free lists becomes fragmented, then the memory may - // not be available for future allocations and the memory is therefore "in use". - // Including the chunk free lists in the definition of "in use" is therefore - // necessary. Not including the chunk free lists can cause capacity_until_GC to - // shrink below committed_bytes() and this has caused serious bugs in the past. - const size_t used_after_gc = MetaspaceUtils::committed_bytes(); - const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); - - const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; - const double maximum_used_percentage = 1.0 - minimum_free_percentage; - - const double min_tmp = used_after_gc / maximum_used_percentage; - size_t minimum_desired_capacity = - (size_t)MIN2(min_tmp, double(MaxMetaspaceSize)); - // Don't shrink less than the initial generation size - minimum_desired_capacity = MAX2(minimum_desired_capacity, - MetaspaceSize); - - log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); - log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", - minimum_free_percentage, maximum_used_percentage); - log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); - - - size_t shrink_bytes = 0; - if (capacity_until_GC < minimum_desired_capacity) { - // If we have less capacity below the metaspace HWM, then - // increment the HWM. - size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; - expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); - // Don't expand unless it's significant - if (expand_bytes >= MinMetaspaceExpansion) { - size_t new_capacity_until_GC = 0; - bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); - assert(succeeded, "Should always succesfully increment HWM when at safepoint"); - - Metaspace::tracer()->report_gc_threshold(capacity_until_GC, - new_capacity_until_GC, - MetaspaceGCThresholdUpdater::ComputeNewSize); - log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", - minimum_desired_capacity / (double) K, - expand_bytes / (double) K, - MinMetaspaceExpansion / (double) K, - new_capacity_until_GC / (double) K); - } - return; - } - - // No expansion, now see if we want to shrink - // We would never want to shrink more than this - assert(capacity_until_GC >= minimum_desired_capacity, - SIZE_FORMAT " >= " SIZE_FORMAT, - capacity_until_GC, minimum_desired_capacity); - size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; - - // Should shrinking be considered? - if (MaxMetaspaceFreeRatio < 100) { - const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; - const double minimum_used_percentage = 1.0 - maximum_free_percentage; - const double max_tmp = used_after_gc / minimum_used_percentage; - size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize)); - maximum_desired_capacity = MAX2(maximum_desired_capacity, - MetaspaceSize); - log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", - maximum_free_percentage, minimum_used_percentage); - log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", - minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); - - assert(minimum_desired_capacity <= maximum_desired_capacity, - "sanity check"); - - if (capacity_until_GC > maximum_desired_capacity) { - // Capacity too large, compute shrinking size - shrink_bytes = capacity_until_GC - maximum_desired_capacity; - // We don't want shrink all the way back to initSize if people call - // System.gc(), because some programs do that between "phases" and then - // we'd just have to grow the heap up again for the next phase. So we - // damp the shrinking: 0% on the first call, 10% on the second call, 40% - // on the third call, and 100% by the fourth call. But if we recompute - // size without shrinking, it goes back to 0%. - shrink_bytes = shrink_bytes / 100 * current_shrink_factor; - - shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); - - assert(shrink_bytes <= max_shrink_bytes, - "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, - shrink_bytes, max_shrink_bytes); - if (current_shrink_factor == 0) { - _shrink_factor = 10; - } else { - _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); - } - log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", - MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); - log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", - shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); - } - } - - // Don't shrink unless it's significant - if (shrink_bytes >= MinMetaspaceExpansion && - ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { - size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); - Metaspace::tracer()->report_gc_threshold(capacity_until_GC, - new_capacity_until_GC, - MetaspaceGCThresholdUpdater::ComputeNewSize); - } -} // MetaspaceUtils size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0}; @@ -531,364 +241,6 @@ } -void MetaspaceUtils::print_vs(outputStream* out, size_t scale) { - const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord); - const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord); - { - if (Metaspace::using_class_space()) { - out->print(" Non-class space: "); - } - print_scaled_words(out, reserved_nonclass_words, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7); - out->print_cr(" committed "); - - if (Metaspace::using_class_space()) { - const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord); - const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord); - out->print(" Class space: "); - print_scaled_words(out, reserved_class_words, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7); - out->print_cr(" committed "); - - const size_t reserved_words = reserved_nonclass_words + reserved_class_words; - const size_t committed_words = committed_nonclass_words + committed_class_words; - out->print(" Both: "); - print_scaled_words(out, reserved_words, scale, 7); - out->print(" reserved, "); - print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7); - out->print_cr(" committed "); - } - } -} - -static void print_basic_switches(outputStream* out, size_t scale) { - out->print("MaxMetaspaceSize: "); - if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) { - // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real - // value is smaller. - out->print("unlimited"); - } else { - print_human_readable_size(out, MaxMetaspaceSize, scale); - } - out->cr(); - if (Metaspace::using_class_space()) { - out->print("CompressedClassSpaceSize: "); - print_human_readable_size(out, CompressedClassSpaceSize, scale); - } - out->cr(); -} - -// This will print out a basic metaspace usage report but -// unlike print_report() is guaranteed not to lock or to walk the CLDG. -void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { - - if (!Metaspace::initialized()) { - out->print_cr("Metaspace not yet initialized."); - return; - } - - out->cr(); - out->print_cr("Usage:"); - - if (Metaspace::using_class_space()) { - out->print(" Non-class: "); - } - - // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from - // MetaspaceUtils. - const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType); - const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType); - const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType); - const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc; - - print_scaled_words(out, cap_nc, scale, 5); - out->print(" capacity, "); - print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5); - out->print(" used, "); - print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5); - out->print(" free+waste, "); - print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5); - out->print(" overhead. "); - out->cr(); - - if (Metaspace::using_class_space()) { - const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType); - const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType); - const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); - const size_t free_and_waste_c = cap_c - overhead_c - used_c; - out->print(" Class: "); - print_scaled_words(out, cap_c, scale, 5); - out->print(" capacity, "); - print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5); - out->print(" used, "); - print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5); - out->print(" free+waste, "); - print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5); - out->print(" overhead. "); - out->cr(); - - out->print(" Both: "); - const size_t cap = cap_nc + cap_c; - - print_scaled_words(out, cap, scale, 5); - out->print(" capacity, "); - print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5); - out->print(" used, "); - print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5); - out->print(" free+waste, "); - print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5); - out->print(" overhead. "); - out->cr(); - } - - out->cr(); - out->print_cr("Virtual space:"); - - print_vs(out, scale); - - out->cr(); - out->print_cr("Chunk freelists:"); - - if (Metaspace::using_class_space()) { - out->print(" Non-Class: "); - } - print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale); - out->cr(); - if (Metaspace::using_class_space()) { - out->print(" Class: "); - print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale); - out->cr(); - out->print(" Both: "); - print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() + - Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale); - out->cr(); - } - - out->cr(); - - // Print basic settings - print_basic_switches(out, scale); - - out->cr(); - -} - -void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) { - - if (!Metaspace::initialized()) { - out->print_cr("Metaspace not yet initialized."); - return; - } - - const bool print_loaders = (flags & rf_show_loaders) > 0; - const bool print_classes = (flags & rf_show_classes) > 0; - const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0; - const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0; - - // Some report options require walking the class loader data graph. - PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype); - if (print_loaders) { - out->cr(); - out->print_cr("Usage per loader:"); - out->cr(); - } - - ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print - - // Print totals, broken up by space type. - if (print_by_spacetype) { - out->cr(); - out->print_cr("Usage per space type:"); - out->cr(); - for (int space_type = (int)Metaspace::ZeroMetaspaceType; - space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++) - { - uintx num_loaders = cl._num_loaders_by_spacetype[space_type]; - uintx num_classes = cl._num_classes_by_spacetype[space_type]; - out->print("%s - " UINTX_FORMAT " %s", - space_type_name((Metaspace::MetaspaceType)space_type), - num_loaders, loaders_plural(num_loaders)); - if (num_classes > 0) { - out->print(", "); - print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]); - out->print(":"); - cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype); - } else { - out->print("."); - out->cr(); - } - out->cr(); - } - } - - // Print totals for in-use data: - out->cr(); - { - uintx num_loaders = cl._num_loaders; - out->print("Total Usage - " UINTX_FORMAT " %s, ", - num_loaders, loaders_plural(num_loaders)); - print_number_of_classes(out, cl._num_classes, cl._num_classes_shared); - out->print(":"); - cl._stats_total.print_on(out, scale, print_by_chunktype); - out->cr(); - } - - // -- Print Virtual space. - out->cr(); - out->print_cr("Virtual space:"); - - print_vs(out, scale); - - // -- Print VirtualSpaceList details. - if ((flags & rf_show_vslist) > 0) { - out->cr(); - out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : ""); - - if (Metaspace::using_class_space()) { - out->print_cr(" Non-Class:"); - } - Metaspace::space_list()->print_on(out, scale); - if (Metaspace::using_class_space()) { - out->print_cr(" Class:"); - Metaspace::class_space_list()->print_on(out, scale); - } - } - out->cr(); - - // -- Print VirtualSpaceList map. - if ((flags & rf_show_vsmap) > 0) { - out->cr(); - out->print_cr("Virtual space map:"); - - if (Metaspace::using_class_space()) { - out->print_cr(" Non-Class:"); - } - Metaspace::space_list()->print_map(out); - if (Metaspace::using_class_space()) { - out->print_cr(" Class:"); - Metaspace::class_space_list()->print_map(out); - } - } - out->cr(); - - // -- Print Freelists (ChunkManager) details - out->cr(); - out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : ""); - - ChunkManagerStatistics non_class_cm_stat; - Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat); - - if (Metaspace::using_class_space()) { - out->print_cr(" Non-Class:"); - } - non_class_cm_stat.print_on(out, scale); - - if (Metaspace::using_class_space()) { - ChunkManagerStatistics class_cm_stat; - Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat); - out->print_cr(" Class:"); - class_cm_stat.print_on(out, scale); - } - - // As a convenience, print a summary of common waste. - out->cr(); - out->print("Waste "); - // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace. - const size_t committed_words = committed_bytes() / BytesPerWord; - - out->print("(percentages refer to total committed size "); - print_scaled_words(out, committed_words, scale); - out->print_cr("):"); - - // Print space committed but not yet used by any class loader - const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord; - out->print(" Committed unused: "); - print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6); - out->cr(); - - // Print waste for in-use chunks. - UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals(); - UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals(); - UsedChunksStatistics ucs_all; - ucs_all.add(ucs_nonclass); - ucs_all.add(ucs_class); - - out->print(" Waste in chunks in use: "); - print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6); - out->cr(); - out->print(" Free in chunks in use: "); - print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6); - out->cr(); - out->print(" Overhead in chunks in use: "); - print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6); - out->cr(); - - // Print waste in free chunks. - const size_t total_capacity_in_free_chunks = - Metaspace::chunk_manager_metadata()->free_chunks_total_words() + - (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0); - out->print(" In free chunks: "); - print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6); - out->cr(); - - // Print waste in deallocated blocks. - const uintx free_blocks_num = - cl._stats_total.nonclass_sm_stats().free_blocks_num() + - cl._stats_total.class_sm_stats().free_blocks_num(); - const size_t free_blocks_cap_words = - cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() + - cl._stats_total.class_sm_stats().free_blocks_cap_words(); - out->print("Deallocated from chunks in use: "); - print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6); - out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num); - out->cr(); - - // Print total waste. - const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks - + free_blocks_cap_words + unused_words_in_vs; - out->print(" -total-: "); - print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6); - out->cr(); - - // Print internal statistics -#ifdef ASSERT - out->cr(); - out->cr(); - out->print_cr("Internal statistics:"); - out->cr(); - out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs); - out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births); - out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths); - out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created); - out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged); - out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded); - out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs); - out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks); - out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".", - g_internal_statistics.num_chunks_added_to_freelist); - out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".", - g_internal_statistics.num_chunks_removed_from_freelist); - out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".", - g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits); - - out->cr(); -#endif - - // Print some interesting settings - out->cr(); - out->cr(); - print_basic_switches(out, scale); - - out->cr(); - out->print("InitialBootClassLoaderMetaspaceSize: "); - print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale); - - out->cr(); - out->cr(); - -} // MetaspaceUtils::print_report() // Prints an ASCII representation of the given space. void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) { @@ -957,12 +309,6 @@ // Metaspace methods -size_t Metaspace::_first_chunk_word_size = 0; -size_t Metaspace::_first_class_chunk_word_size = 0; - -size_t Metaspace::_commit_alignment = 0; -size_t Metaspace::_reserve_alignment = 0; - VirtualSpaceList* Metaspace::_space_list = NULL; VirtualSpaceList* Metaspace::_class_space_list = NULL; @@ -1178,12 +524,9 @@ assert(rs.size() >= CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); assert(using_class_space(), "Must be using class space"); - _class_space_list = new VirtualSpaceList(rs); - _chunk_manager_class = new ChunkManager(true/*is_class*/); + _class_space_list = new VirtualSpaceList("class space list", rs); + _chunk_manager_class = new ChunkManager("class space chunk manager", _class_space_list); - if (!_class_space_list->initialization_succeeded()) { - vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); - } } #endif @@ -1200,7 +543,12 @@ } _commit_alignment = page_size; - _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); + + // Reserve alignment: all Metaspace memory mappings are to be aligned to the size of a root chunk. + assert(is_aligned_to((int)MAX_CHUNK_BYTE_SIZE, os::vm_allocation_granularity()), + "root chunk size must be a multiple of alloc granularity"); + + _reserve_alignment = MAX2(page_size, (size_t)MAX_CHUNK_BYTE_SIZE); // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will // override if MaxMetaspaceSize was set on the command line or not. @@ -1246,7 +594,7 @@ } void Metaspace::global_initialize() { - MetaspaceGC::initialize(); + MetaspaceGC::initialize(); // <- since we do not prealloc init chunks anymore is this still needed? #if INCLUDE_CDS if (DumpSharedSpaces) { @@ -1262,10 +610,10 @@ if (DynamicDumpSharedSpaces && !UseSharedSpaces) { vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL); } - - if (!DumpSharedSpaces && !UseSharedSpaces) #endif // INCLUDE_CDS - { + + // Initialize class space: + if (CDS_ONLY(!DumpSharedSpaces && !UseSharedSpaces) NOT_CDS(true)) { #ifdef _LP64 if (using_class_space()) { char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); @@ -1274,27 +622,9 @@ #endif // _LP64 } - // Initialize these before initializing the VirtualSpaceList - _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; - _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); - // Make the first class chunk bigger than a medium chunk so it's not put - // on the medium chunk list. The next chunk will be small and progress - // from there. This size calculated by -version. - _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, - (CompressedClassSpaceSize/BytesPerWord)*2); - _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); - // Arbitrarily set the initial virtual space to a multiple - // of the boot class loader size. - size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; - word_size = align_up(word_size, Metaspace::reserve_alignment_words()); - - // Initialize the list of virtual spaces. - _space_list = new VirtualSpaceList(word_size); - _chunk_manager_metadata = new ChunkManager(false/*metaspace*/); - - if (!_space_list->initialization_succeeded()) { - vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); - } + // Initialize non-class virtual space list, and its chunk manager: + _space_list = new VirtualSpaceList("Non-Class VirtualSpaceList"); + _chunk_manager_metadata = new ChunkManager("Non-Class ChunkManager", _space_list); _tracer = new MetaspaceTracer(); @@ -1316,10 +646,6 @@ } } -size_t Metaspace::align_word_size_up(size_t word_size) { - size_t byte_size = word_size * wordSize; - return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; -} MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, TRAPS) { @@ -1460,174 +786,6 @@ return get_space_list(NonClassType)->contains(ptr); } -// ClassLoaderMetaspace - -ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) - : _space_type(type) - , _lock(lock) - , _vsm(NULL) - , _class_vsm(NULL) -{ - initialize(lock, type); -} - -ClassLoaderMetaspace::~ClassLoaderMetaspace() { - Metaspace::assert_not_frozen(); - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths)); - delete _vsm; - if (Metaspace::using_class_space()) { - delete _class_vsm; - } -} - -void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { - Metachunk* chunk = get_initialization_chunk(type, mdtype); - if (chunk != NULL) { - // Add to this manager's list of chunks in use and make it the current_chunk(). - get_space_manager(mdtype)->add_chunk(chunk, true); - } -} - -Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) { - size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type); - - // Get a chunk from the chunk freelist - Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); - - if (chunk == NULL) { - chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size, - get_space_manager(mdtype)->medium_chunk_bunch()); - } - - return chunk; -} - -void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) { - Metaspace::verify_global_initialization(); - - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births)); - - // Allocate SpaceManager for metadata objects. - _vsm = new SpaceManager(Metaspace::NonClassType, type, lock); - - if (Metaspace::using_class_space()) { - // Allocate SpaceManager for classes. - _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock); - } - - MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); - - // Allocate chunk for metadata objects - initialize_first_chunk(type, Metaspace::NonClassType); - - // Allocate chunk for class metadata objects - if (Metaspace::using_class_space()) { - initialize_first_chunk(type, Metaspace::ClassType); - } -} - -MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) { - Metaspace::assert_not_frozen(); - - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs)); - - // Don't use class_vsm() unless UseCompressedClassPointers is true. - if (Metaspace::is_class_space_allocation(mdtype)) { - return class_vsm()->allocate(word_size); - } else { - return vsm()->allocate(word_size); - } -} - -MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) { - Metaspace::assert_not_frozen(); - size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); - assert(delta_bytes > 0, "Must be"); - - size_t before = 0; - size_t after = 0; - bool can_retry = true; - MetaWord* res; - bool incremented; - - // Each thread increments the HWM at most once. Even if the thread fails to increment - // the HWM, an allocation is still attempted. This is because another thread must then - // have incremented the HWM and therefore the allocation might still succeed. - do { - incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry); - res = allocate(word_size, mdtype); - } while (!incremented && res == NULL && can_retry); - - if (incremented) { - Metaspace::tracer()->report_gc_threshold(before, after, - MetaspaceGCThresholdUpdater::ExpandAndAllocate); - log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); - } - - return res; -} - -size_t ClassLoaderMetaspace::allocated_blocks_bytes() const { - return (vsm()->used_words() + - (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord; -} - -size_t ClassLoaderMetaspace::allocated_chunks_bytes() const { - return (vsm()->capacity_words() + - (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord; -} - -void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { - Metaspace::assert_not_frozen(); - assert(!SafepointSynchronize::is_at_safepoint() - || Thread::current()->is_VM_thread(), "should be the VM thread"); - - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs)); - - MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); - - if (is_class && Metaspace::using_class_space()) { - class_vsm()->deallocate(ptr, word_size); - } else { - vsm()->deallocate(ptr, word_size); - } -} - -size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) { - assert(Metaspace::using_class_space(), "Has to use class space"); - return class_vsm()->calc_chunk_size(word_size); -} - -void ClassLoaderMetaspace::print_on(outputStream* out) const { - // Print both class virtual space counts and metaspace. - if (Verbose) { - vsm()->print_on(out); - if (Metaspace::using_class_space()) { - class_vsm()->print_on(out); - } - } -} - -void ClassLoaderMetaspace::verify() { - vsm()->verify(); - if (Metaspace::using_class_space()) { - class_vsm()->verify(); - } -} - -void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const { - assert_lock_strong(lock()); - vsm()->add_to_statistics_locked(&out->nonclass_sm_stats()); - if (Metaspace::using_class_space()) { - class_vsm()->add_to_statistics_locked(&out->class_sm_stats()); - } -} - -void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const { - MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); - add_to_statistics_locked(out); -} - /////////////// Unit tests /////////////// struct chunkmanager_statistics_t { --- old/src/hotspot/share/memory/metaspace.hpp 2019-07-22 11:08:02.353657619 +0200 +++ new/src/hotspot/share/memory/metaspace.hpp 2019-07-22 11:08:02.137655305 +0200 @@ -110,22 +110,11 @@ private: - // Align up the word size to the allocation word size - static size_t align_word_size_up(size_t); - // Aligned size of the metaspace. static size_t _compressed_class_space_size; - static size_t compressed_class_space_size() { - return _compressed_class_space_size; - } - - static void set_compressed_class_space_size(size_t size) { - _compressed_class_space_size = size; - } - - static size_t _first_chunk_word_size; - static size_t _first_class_chunk_word_size; + static size_t compressed_class_space_size() { return _compressed_class_space_size; } + static void set_compressed_class_space_size(size_t size) { _compressed_class_space_size = size; } static size_t _commit_alignment; static size_t _reserve_alignment; @@ -142,14 +131,17 @@ static bool _initialized; - public: static metaspace::VirtualSpaceList* space_list() { return _space_list; } static metaspace::VirtualSpaceList* class_space_list() { return _class_space_list; } + static metaspace::VirtualSpaceList* get_space_list(MetadataType mdtype) { assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); return mdtype == ClassType ? class_space_list() : space_list(); } +public: + + static metaspace::ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; } static metaspace::ChunkManager* chunk_manager_class() { return _chunk_manager_class; } static metaspace::ChunkManager* get_chunk_manager(MetadataType mdtype) { @@ -193,13 +185,13 @@ static void verify_global_initialization(); - static size_t first_chunk_word_size() { return _first_chunk_word_size; } - static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } - + // The alignment at which Metaspace mappings are reserved. static size_t reserve_alignment() { return _reserve_alignment; } static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; } + + // The granularity at which Metaspace is committed and uncommitted. static size_t commit_alignment() { return _commit_alignment; } - static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; } + static size_t commit_words() { return _commit_alignment / BytesPerWord; } static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, TRAPS); @@ -209,7 +201,6 @@ // Free empty virtualspaces static void purge(MetadataType mdtype); - static void purge(); static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS); @@ -231,73 +222,6 @@ }; -// Manages the metaspace portion belonging to a class loader -class ClassLoaderMetaspace : public CHeapObj { - friend class CollectedHeap; // For expand_and_allocate() - friend class ZCollectedHeap; // For expand_and_allocate() - friend class ShenandoahHeap; // For expand_and_allocate() - friend class Metaspace; - friend class MetaspaceUtils; - friend class metaspace::PrintCLDMetaspaceInfoClosure; - friend class VM_CollectForMetadataAllocation; // For expand_and_allocate() - - private: - - void initialize(Mutex* lock, Metaspace::MetaspaceType type); - - // Initialize the first chunk for a Metaspace. Used for - // special cases such as the boot class loader, reflection - // class loader and anonymous class loader. - void initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype); - metaspace::Metachunk* get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype); - - const Metaspace::MetaspaceType _space_type; - Mutex* const _lock; - metaspace::SpaceManager* _vsm; - metaspace::SpaceManager* _class_vsm; - - metaspace::SpaceManager* vsm() const { return _vsm; } - metaspace::SpaceManager* class_vsm() const { return _class_vsm; } - metaspace::SpaceManager* get_space_manager(Metaspace::MetadataType mdtype) { - assert(mdtype != Metaspace::MetadataTypeCount, "MetadaTypeCount can't be used as mdtype"); - return mdtype == Metaspace::ClassType ? class_vsm() : vsm(); - } - - Mutex* lock() const { return _lock; } - - MetaWord* expand_and_allocate(size_t size, Metaspace::MetadataType mdtype); - - size_t class_chunk_size(size_t word_size); - - // Adds to the given statistic object. Must be locked with CLD metaspace lock. - void add_to_statistics_locked(metaspace::ClassLoaderMetaspaceStatistics* out) const; - - Metaspace::MetaspaceType space_type() const { return _space_type; } - - public: - - ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type); - ~ClassLoaderMetaspace(); - - // Allocate space for metadata of type mdtype. This is space - // within a Metachunk and is used by - // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) - MetaWord* allocate(size_t word_size, Metaspace::MetadataType mdtype); - - size_t allocated_blocks_bytes() const; - size_t allocated_chunks_bytes() const; - - void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); - - void print_on(outputStream* st) const; - // Debugging support - void verify(); - - // Adds to the given statistic object. Will lock with CLD metaspace lock. - void add_to_statistics(metaspace::ClassLoaderMetaspaceStatistics* out) const; - -}; // ClassLoaderMetaspace - class MetaspaceUtils : AllStatic { // Spacemanager updates running counters. @@ -331,8 +255,6 @@ static size_t free_chunks_total_words(Metaspace::MetadataType mdtype); - // Helper for print_xx_report. - static void print_vs(outputStream* out, size_t scale); public: @@ -386,32 +308,6 @@ static size_t min_chunk_size_words(); - // Flags for print_report(). - enum ReportFlag { - // Show usage by class loader. - rf_show_loaders = (1 << 0), - // Breaks report down by chunk type (small, medium, ...). - rf_break_down_by_chunktype = (1 << 1), - // Breaks report down by space type (anonymous, reflection, ...). - rf_break_down_by_spacetype = (1 << 2), - // Print details about the underlying virtual spaces. - rf_show_vslist = (1 << 3), - // Print metaspace map. - rf_show_vsmap = (1 << 4), - // If show_loaders: show loaded classes for each loader. - rf_show_classes = (1 << 5) - }; - - // This will print out a basic metaspace usage report but - // unlike print_report() is guaranteed not to lock or to walk the CLDG. - static void print_basic_report(outputStream* st, size_t scale); - - // Prints a report about the current metaspace state. - // Optional parts can be enabled via flags. - // Function will walk the CLDG and will lock the expand lock; if that is not - // convenient, use print_basic_report() instead. - static void print_report(outputStream* out, size_t scale = 0, int flags = 0); - static bool has_chunk_free_list(Metaspace::MetadataType mdtype); static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype); @@ -428,56 +324,4 @@ static void verify_metrics(); }; -// Metaspace are deallocated when their class loader are GC'ed. -// This class implements a policy for inducing GC's to recover -// Metaspaces. - -class MetaspaceGC : AllStatic { - - // The current high-water-mark for inducing a GC. - // When committed memory of all metaspaces reaches this value, - // a GC is induced and the value is increased. Size is in bytes. - static volatile size_t _capacity_until_GC; - - // For a CMS collection, signal that a concurrent collection should - // be started. - static bool _should_concurrent_collect; - - static uint _shrink_factor; - - static size_t shrink_factor() { return _shrink_factor; } - void set_shrink_factor(uint v) { _shrink_factor = v; } - - public: - - static void initialize(); - static void post_initialize(); - - static size_t capacity_until_GC(); - static bool inc_capacity_until_GC(size_t v, - size_t* new_cap_until_GC = NULL, - size_t* old_cap_until_GC = NULL, - bool* can_retry = NULL); - static size_t dec_capacity_until_GC(size_t v); - - static bool should_concurrent_collect() { return _should_concurrent_collect; } - static void set_should_concurrent_collect(bool v) { - _should_concurrent_collect = v; - } - - // The amount to increase the high-water-mark (_capacity_until_GC) - static size_t delta_capacity_until_GC(size_t bytes); - - // Tells if we have can expand metaspace without hitting set limits. - static bool can_expand(size_t words, bool is_class); - - // Returns amount that we can expand without hitting a GC, - // measured in words. - static size_t allowed_expansion(); - - // Calculate the new high-water mark at which to induce - // a GC. - static void compute_new_size(); -}; - #endif // SHARE_MEMORY_METASPACE_HPP --- old/src/hotspot/share/memory/metaspace/chunkManager.cpp 2019-07-22 11:08:02.849662933 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkManager.cpp 2019-07-22 11:08:02.637660661 +0200 @@ -23,617 +23,230 @@ */ #include "precompiled.hpp" + #include "logging/log.hpp" -#include "logging/logStream.hpp" -#include "memory/binaryTreeDictionary.inline.hpp" -#include "memory/freeList.inline.hpp" +#include "memory/metaspace/constants.hpp" +#include "memory/metaspace/chunkAllocSequence.hpp" +#include "memory/metaspace/chunkLevel.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/metaDebug.hpp" -#include "memory/metaspace/metaspaceCommon.hpp" -#include "memory/metaspace/metaspaceStatistics.hpp" -#include "memory/metaspace/occupancyMap.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" +#include "memory/metaspace/virtualSpaceList.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" -#include "utilities/ostream.hpp" namespace metaspace { -ChunkManager::ChunkManager(bool is_class) - : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) { - _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class)); - _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class)); - _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class)); -} - -void ChunkManager::remove_chunk(Metachunk* chunk) { - size_t word_size = chunk->word_size(); - ChunkIndex index = list_index(word_size); - if (index != HumongousIndex) { - free_chunks(index)->remove_chunk(chunk); - } else { - humongous_dictionary()->remove_chunk(chunk); - } - - // Chunk has been removed from the chunks free list, update counters. - account_for_removed_chunk(chunk); -} - -bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) { - assert_lock_strong(MetaspaceExpand_lock); - assert(chunk != NULL, "invalid chunk pointer"); - // Check for valid merge combinations. - assert((chunk->get_chunk_type() == SpecializedIndex && - (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) || - (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex), - "Invalid chunk merge combination."); - - const size_t target_chunk_word_size = - get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class()); - - // [ prospective merge region ) - MetaWord* const p_merge_region_start = - (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord)); - MetaWord* const p_merge_region_end = - p_merge_region_start + target_chunk_word_size; - - // We need the VirtualSpaceNode containing this chunk and its occupancy map. - VirtualSpaceNode* const vsn = chunk->container(); - OccupancyMap* const ocmap = vsn->occupancy_map(); - - // The prospective chunk merge range must be completely contained by the - // committed range of the virtual space node. - if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) { - return false; - } - - // Only attempt to merge this range if at its start a chunk starts and at its end - // a chunk ends. If a chunk (can only be humongous) straddles either start or end - // of that range, we cannot merge. - if (!ocmap->chunk_starts_at_address(p_merge_region_start)) { - return false; - } - if (p_merge_region_end < vsn->top() && - !ocmap->chunk_starts_at_address(p_merge_region_end)) { - return false; - } - - // Now check if the prospective merge area contains live chunks. If it does we cannot merge. - if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) { - return false; - } - - // Success! Remove all chunks in this region... - log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...", - (is_class() ? "class space" : "metaspace"), - p_merge_region_start, p_merge_region_end); - - const int num_chunks_removed = - remove_chunks_in_area(p_merge_region_start, target_chunk_word_size); - - // ... and create a single new bigger chunk. - Metachunk* const p_new_chunk = - ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn); - assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity"); - p_new_chunk->set_origin(origin_merge); - - log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".", - (is_class() ? "class space" : "metaspace"), - p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord)); - - // Fix occupancy map: remove old start bits of the small chunks and set new start bit. - ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size); - ocmap->set_chunk_starts_at_address(p_merge_region_start, true); - - // Mark chunk as free. Note: it is not necessary to update the occupancy - // map in-use map, because the old chunks were also free, so nothing - // should have changed. - p_new_chunk->set_is_tagged_free(true); - - // Add new chunk to its freelist. - ChunkList* const list = free_chunks(target_chunk_type); - list->return_chunk_at_head(p_new_chunk); - - // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total - // should not have changed, because the size of the space should be the same) - _free_chunks_count -= num_chunks_removed; - _free_chunks_count ++; - - // VirtualSpaceNode::chunk_count does not have to be modified: - // it means "number of active (non-free) chunks", so merging free chunks - // should not affect that count. - - // At the end of a chunk merge, run verification tests. -#ifdef ASSERT - - EVERY_NTH(VerifyMetaspaceInterval) - locked_verify(true); - vsn->verify(true); - END_EVERY_NTH - g_internal_statistics.num_chunk_merges ++; - -#endif - - return true; -} - -// Remove all chunks in the given area - the chunks are supposed to be free - -// from their corresponding freelists. Mark them as invalid. -// - This does not correct the occupancy map. -// - This does not adjust the counters in ChunkManager. -// - Does not adjust container count counter in containing VirtualSpaceNode -// Returns number of chunks removed. -int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) { - assert(p != NULL && word_size > 0, "Invalid range."); - const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class()); - assert_is_aligned(word_size, smallest_chunk_size); - - Metachunk* const start = (Metachunk*) p; - const Metachunk* const end = (Metachunk*)(p + word_size); - Metachunk* cur = start; - int num_removed = 0; - while (cur < end) { - Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size()); - DEBUG_ONLY(do_verify_chunk(cur)); - assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur); - assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur); - log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".", - (is_class() ? "class space" : "metaspace"), - cur, cur->word_size() * sizeof(MetaWord)); - cur->remove_sentinel(); - // Note: cannot call ChunkManager::remove_chunk, because that - // modifies the counters in ChunkManager, which we do not want. So - // we call remove_chunk on the freelist directly (see also the - // splitting function which does the same). - ChunkList* const list = free_chunks(list_index(cur->word_size())); - list->remove_chunk(cur); - num_removed ++; - cur = next; +// Return a single chunk to the freelist and adjust accounting. No merge is attempted. +void ChunkManager::return_chunk_simple(Metachunk* c) { + DEBUG_ONLY(c->verify(false)); + const chklvl_t lvl = c->level(); + _chunks[lvl].add(c); + _total_word_size.increment_by(c->word_size()); +} + +// Remove the given chunk from its free list and adjust accounting. +void ChunkManager::remove_chunk(Metachunk* c) { + DEBUG_ONLY(c->verify(false)); + const chklvl_t lvl = c->level(); + _chunks[lvl].remove(c); + _total_word_size.decrement_by(c->word_size()); +} + +// Creates a chunk manager with a given name (which is for debug purposes only) +// and an associated space list which will be used to request new chunks from +// (see get_chunk()) +ChunkManager::ChunkManager(const char* name, VirtualSpaceList* space_list) + : _vslist(space_list), + _name(name) +{ + for (int i = 0; i < chklvl::NUM_CHUNK_LEVELS; i ++) { + _chunks[i] = NULL; } - return num_removed; } -// Update internal accounting after a chunk was added -void ChunkManager::account_for_added_chunk(const Metachunk* c) { - assert_lock_strong(MetaspaceExpand_lock); - _free_chunks_count ++; - _free_chunks_total += c->word_size(); -} - -// Update internal accounting after a chunk was removed -void ChunkManager::account_for_removed_chunk(const Metachunk* c) { - assert_lock_strong(MetaspaceExpand_lock); - assert(_free_chunks_count >= 1, - "ChunkManager::_free_chunks_count: about to go negative (" SIZE_FORMAT ").", _free_chunks_count); - assert(_free_chunks_total >= c->word_size(), - "ChunkManager::_free_chunks_total: about to go negative" - "(now: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ").", _free_chunks_total, c->word_size()); - _free_chunks_count --; - _free_chunks_total -= c->word_size(); -} - -ChunkIndex ChunkManager::list_index(size_t size) { - return get_chunk_type_by_size(size, is_class()); -} - -size_t ChunkManager::size_by_index(ChunkIndex index) const { - index_bounds_check(index); - assert(index != HumongousIndex, "Do not call for humongous chunks."); - return get_size_for_nonhumongous_chunktype(index, is_class()); +// Given a chunk we are about to handout to the caller, make sure it is committed +// according to constants::committed_words_on_fresh_chunks +bool ChunkManager::commit_chunk_before_handout(Metachunk* c) { + const size_t must_be_committed = MIN2(c->word_size(), constants::committed_words_on_fresh_chunks); + return c->ensure_committed(must_be_committed); } #ifdef ASSERT -void ChunkManager::verify(bool slow) const { - MutexLocker cl(MetaspaceExpand_lock, - Mutex::_no_safepoint_check_flag); - locked_verify(slow); -} - -void ChunkManager::locked_verify(bool slow) const { - log_trace(gc, metaspace, freelist)("verifying %s chunkmanager (%s).", - (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick")); - - assert_lock_strong(MetaspaceExpand_lock); - - size_t chunks_counted = 0; - size_t wordsize_chunks_counted = 0; - for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { - const ChunkList* list = _free_chunks + i; - if (list != NULL) { - Metachunk* chunk = list->head(); - while (chunk) { - if (slow) { - do_verify_chunk(chunk); - } - assert(chunk->is_tagged_free(), "Chunk should be tagged as free."); - chunks_counted ++; - wordsize_chunks_counted += chunk->size(); - chunk = chunk->next(); - } +// Given a splinters array returned from a split operation, check that it meets expectations +static void check_splinters_array(Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS], chklvl_t min, chklvl_t max) { + // The array shall contain splinters in the range [min, max] and nothing outside. The chunk levels for + // the chunks must match too. + for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + if (l >= min && l < max) { + assert(splinters[l] != NULL, "Missing splinters"); + assert(splinters[l]->level() == l, "Unexpected level"); + splinters[l]->verify(false); + } else { + assert(splinters[l] == NULL, "Unexpected splinters"); } } - - chunks_counted += humongous_dictionary()->total_free_blocks(); - wordsize_chunks_counted += humongous_dictionary()->total_size(); - - assert(chunks_counted == _free_chunks_count && wordsize_chunks_counted == _free_chunks_total, - "freelist accounting mismatch: " - "we think: " SIZE_FORMAT " chunks, total " SIZE_FORMAT " words, " - "reality: " SIZE_FORMAT " chunks, total " SIZE_FORMAT " words.", - _free_chunks_count, _free_chunks_total, - chunks_counted, wordsize_chunks_counted); -} -#endif // ASSERT - -void ChunkManager::locked_print_free_chunks(outputStream* st) { - assert_lock_strong(MetaspaceExpand_lock); - st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, - _free_chunks_total, _free_chunks_count); -} - -ChunkList* ChunkManager::free_chunks(ChunkIndex index) { - assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex, - "Bad index: %d", (int)index); - return &_free_chunks[index]; } +#endif -ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { - ChunkIndex index = list_index(word_size); - assert(index < HumongousIndex, "No humongous list"); - return free_chunks(index); -} -// Helper for chunk splitting: given a target chunk size and a larger free chunk, -// split up the larger chunk into n smaller chunks, at least one of which should be -// the target chunk of target chunk size. The smaller chunks, including the target -// chunk, are returned to the freelist. The pointer to the target chunk is returned. -// Note that this chunk is supposed to be removed from the freelist right away. -Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) { - assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity"); - - const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type(); - const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class()); - - MetaWord* const region_start = (MetaWord*)larger_chunk; - const size_t region_word_len = larger_chunk->word_size(); - MetaWord* const region_end = region_start + region_word_len; - VirtualSpaceNode* const vsn = larger_chunk->container(); - OccupancyMap* const ocmap = vsn->occupancy_map(); - - // Any larger non-humongous chunk size is a multiple of any smaller chunk size. - // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start - // at an address suitable to place the smaller target chunk. - assert_is_aligned(region_start, target_chunk_word_size); - - // Remove old chunk. - free_chunks(larger_chunk_index)->remove_chunk(larger_chunk); - larger_chunk->remove_sentinel(); - - // Prevent access to the old chunk from here on. - larger_chunk = NULL; - // ... and wipe it. - DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord)); - - // In its place create first the target chunk... - MetaWord* p = region_start; - Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn); - assert(target_chunk == (Metachunk*)p, "Sanity"); - target_chunk->set_origin(origin_split); - - // Note: we do not need to mark its start in the occupancy map - // because it coincides with the old chunk start. - - // Mark chunk as free and return to the freelist. - do_update_in_use_info_for_chunk(target_chunk, false); - free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk); - - // This chunk should now be valid and can be verified. - DEBUG_ONLY(do_verify_chunk(target_chunk)); - - // In the remaining space create the remainder chunks. - p += target_chunk->word_size(); - assert(p < region_end, "Sanity"); - - while (p < region_end) { - - // Find the largest chunk size which fits the alignment requirements at address p. - ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index); - size_t this_chunk_word_size = 0; - for(;;) { - this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class()); - if (is_aligned(p, this_chunk_word_size * BytesPerWord)) { +// Given a chunk which must be outside of a freelist and must be free, split it to +// meet a target level and return it. Splinters are added to the freelist. +Metachunk* ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chklvl_t target_level) { + + assert(c->is_free() && c->level() > target_level, "Invalid chunk for splitting"); + DEBUG_ONLY(chklvl::check_valid_level(target_level);) + + const chklvl_t orig_level = c->level(); + Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS] = { 0 }; + c = c->vsnode()->split(target_level, c, splinters); + + // Splitting should never fail. + assert(c != NULL, "Split failed"); + assert(c->level() == target_level, "Sanity"); + DEBUG_ONLY(c->verify(false)); + DEBUG_ONLY(check_splinters_array(splinters, orig_level + 1, target_level);) + + // Return splinters to freelist. + for (chklvl_t l = orig_level + 1; l <= target_level; l ++) { + return_chunk_simple(splinters[l]); + } + + return c; +} + +// Get a chunk and be smart about it. +// - 1) Attempt to find a free chunk of exactly the pref_level level +// - 2) Failing that, attempt to find a chunk smaller or equal the minimal level. +// - 3) Failing that, attempt to find a free chunk of larger size and split it. +// - 4) Failing that, attempt to allocate a new chunk from the connected virtual space. +// - Failing that, give up and return NULL. +// Note: this is not guaranteed to return a *committed* chunk. The chunk manager will +// attempt to commit the returned chunk according to constants::committed_words_on_fresh_chunks; +// but this may fail if we hit a commit limit. In that case, a partly uncommit chunk +// will be returned, and the commit is attempted again when we allocate from the chunk's +// uncommitted area. See also Metachunk::allocate. +Metachunk* ChunkManager::get_chunk(chklvl_t min_level, chklvl_t pref_level) { + + assert_lock_strong(MetaspaceExpand_lock); + DEBUG_ONLY(chklvl::check_valid_level(min_level);) + DEBUG_ONLY(chklvl::check_valid_level(pref_level);) + + Metachunk* c = NULL; + + // 1) Attempt to find a free chunk of exactly the pref_level level + if (_chunks[pref_level] != NULL) { + c = _chunks[pref_level]; + remove_chunk(c); + } + + // 2) Failing that, attempt to find a chunk smaller or equal the minimal level. + if (c == NULL) { + for (chklvl_t lvl = pref_level + 1; lvl <= min_level; lvl ++) { + Metachunk* c = _chunks[lvl]; + if (c != NULL) { + remove_chunk(c); break; - } else { - this_chunk_index = prev_chunk_index(this_chunk_index); - assert(this_chunk_index >= target_chunk_index, "Sanity"); } } - - assert(this_chunk_word_size >= target_chunk_word_size, "Sanity"); - assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity"); - assert(p + this_chunk_word_size <= region_end, "Sanity"); - - // Create splitting chunk. - Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn); - assert(this_chunk == (Metachunk*)p, "Sanity"); - this_chunk->set_origin(origin_split); - ocmap->set_chunk_starts_at_address(p, true); - do_update_in_use_info_for_chunk(this_chunk, false); - - // This chunk should be valid and can be verified. - DEBUG_ONLY(do_verify_chunk(this_chunk)); - - // Return this chunk to freelist and correct counter. - free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk); - _free_chunks_count ++; - - log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size " - SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").", - p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index), - p2i(region_start), p2i(region_end)); - - p += this_chunk_word_size; - } - // Note: at this point, the VirtualSpaceNode is invalid since we split a chunk and - // did not yet hand out part of that split; so, vsn->verify_free_chunks_are_ideally_merged() - // would assert. Instead, do all verifications in the caller. - - DEBUG_ONLY(g_internal_statistics.num_chunk_splits ++); - - return target_chunk; -} - -Metachunk* ChunkManager::free_chunks_get(size_t word_size) { - assert_lock_strong(MetaspaceExpand_lock); - - Metachunk* chunk = NULL; - bool we_did_split_a_chunk = false; - - if (list_index(word_size) != HumongousIndex) { - - ChunkList* free_list = find_free_chunks_list(word_size); - assert(free_list != NULL, "Sanity check"); - - chunk = free_list->head(); - - if (chunk == NULL) { - // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks. - // This is the counterpart of the coalescing-upon-chunk-return. - - ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class()); - - // Is there a larger chunk we could split? - Metachunk* larger_chunk = NULL; - ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index); - while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) { - larger_chunk = free_chunks(larger_chunk_index)->head(); - if (larger_chunk == NULL) { - larger_chunk_index = next_chunk_index(larger_chunk_index); - } - } + // 3) Failing that, attempt to find a free chunk of larger size and split it. + if (c == NULL) { + for (chklvl_t lvl = pref_level - 1; lvl >= chklvl::ROOT_CHUNK_LEVEL; lvl --) { + Metachunk* c = _chunks[lvl]; + if (c != NULL) { - if (larger_chunk != NULL) { - assert(larger_chunk->word_size() > word_size, "Sanity"); - assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity"); - - // We found a larger chunk. Lets split it up: - // - remove old chunk - // - in its place, create new smaller chunks, with at least one chunk - // being of target size, the others sized as large as possible. This - // is to make sure the resulting chunks are "as coalesced as possible" - // (similar to VirtualSpaceNode::retire()). - // Note: during this operation both ChunkManager and VirtualSpaceNode - // are temporarily invalid, so be careful with asserts. - - log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT - ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...", - (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(), - chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index)); - - chunk = split_chunk(word_size, larger_chunk); - - // This should have worked. - assert(chunk != NULL, "Sanity"); - assert(chunk->word_size() == word_size, "Sanity"); - assert(chunk->is_tagged_free(), "Sanity"); + // Remove chunk before splitting. + remove_chunk(c); - we_did_split_a_chunk = true; + // Split chunk; add splinters to freelist + c = split_chunk_and_add_splinters(c, pref_level); + break; } } + } - if (chunk == NULL) { - return NULL; - } - - // Remove the chunk as the head of the list. - free_list->remove_chunk(chunk); + // 4) Failing that, attempt to allocate a new chunk from the connected virtual space. + if (c == NULL) { - log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".", - p2i(free_list), free_list->count()); + c = _vslist->allocate_root_chunk(); - } else { - chunk = humongous_dictionary()->get_chunk(word_size); + // This should always work. Note that getting the root chunk may not mean we committed memory. + assert(c != NULL, "Unexpected"); - if (chunk == NULL) { - return NULL; - } + // Split this root chunk to the desired chunk size. + c = split_chunk_and_add_splinters(c, pref_level); - log_trace(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, - chunk->word_size(), word_size, chunk->word_size() - word_size); } - // Chunk has been removed from the chunk manager; update counters. - account_for_removed_chunk(chunk); - do_update_in_use_info_for_chunk(chunk, true); - chunk->container()->inc_container_count(); - chunk->inc_use_count(); - - // Remove it from the links to this freelist - chunk->set_next(NULL); - chunk->set_prev(NULL); + // Note that we should at this point have a chunk; should always work. If we hit + // a commit limit in the meantime, the chunk may still be uncommitted, but the chunk + // itself should exist now. + assert(c != NULL, "Unexpected"); - // Run some verifications (some more if we did a chunk split) -#ifdef ASSERT + // Before returning the chunk, attempt to commit it according to the handout rules. + // If that fails, we ignore the error and return the uncommitted chunk. + if (commit_chunk_before_handout(c) == false) { + log_info(gc, metaspace)("Failed to commit chunk prior to handout."); + } - EVERY_NTH(VerifyMetaspaceInterval) - // Be extra verify-y when chunk split happened. - locked_verify(true); - VirtualSpaceNode* const vsn = chunk->container(); - vsn->verify(true); - if (we_did_split_a_chunk) { - vsn->verify_free_chunks_are_ideally_merged(); - } - END_EVERY_NTH + return c; - g_internal_statistics.num_chunks_removed_from_freelist ++; +} // ChunkManager::get_chunk -#endif - return chunk; -} +// Return a single chunk to the ChunkManager and adjust accounting. May merge chunk +// with neighbors. +// Happens after a Classloader was unloaded and releases its metaspace chunks. +// !! Note: this may invalidate the chunk. Do not access the chunk after +// this function returns !! +void ChunkManager::return_chunk(Metachunk* c) { -Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { assert_lock_strong(MetaspaceExpand_lock); + DEBUG_ONLY(c->verify(false);) - // Take from the beginning of the list - Metachunk* chunk = free_chunks_get(word_size); - if (chunk == NULL) { - return NULL; - } + const chklvl_t orig_lvl = c->level(); - assert((word_size <= chunk->word_size()) || - (list_index(chunk->word_size()) == HumongousIndex), - "Non-humongous variable sized chunk"); - LogTarget(Trace, gc, metaspace, freelist) lt; - if (lt.is_enabled()) { - size_t list_count; - if (list_index(word_size) < HumongousIndex) { - ChunkList* list = find_free_chunks_list(word_size); - list_count = list->count(); - } else { - list_count = humongous_dictionary()->total_count(); - } - LogStream ls(lt); - ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", - p2i(this), p2i(chunk), chunk->word_size(), list_count); - ResourceMark rm; - locked_print_free_chunks(&ls); - } - - return chunk; -} - -void ChunkManager::return_single_chunk(Metachunk* chunk) { + int num_merged[chklvl::NUM_CHUNK_LEVELS] = { 0 }; + Metachunk* c2 = c->vsnode()->merge(c, num_merged); -#ifdef ASSERT - EVERY_NTH(VerifyMetaspaceInterval) - this->locked_verify(false); - do_verify_chunk(chunk); - END_EVERY_NTH -#endif - - const ChunkIndex index = chunk->get_chunk_type(); - assert_lock_strong(MetaspaceExpand_lock); - DEBUG_ONLY(g_internal_statistics.num_chunks_added_to_freelist ++;) - assert(chunk != NULL, "Expected chunk."); - assert(chunk->container() != NULL, "Container should have been set."); - assert(chunk->is_tagged_free() == false, "Chunk should be in use."); - index_bounds_check(index); - - // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not - // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary - // keeps tree node pointers in the chunk payload area which mangle will overwrite. - DEBUG_ONLY(chunk->mangle(badMetaWordVal);) - - // may need node for verification later after chunk may have been merged away. - DEBUG_ONLY(VirtualSpaceNode* vsn = chunk->container(); ) - - if (index != HumongousIndex) { - // Return non-humongous chunk to freelist. - ChunkList* list = free_chunks(index); - assert(list->size() == chunk->word_size(), "Wrong chunk type."); - list->return_chunk_at_head(chunk); - log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " to freelist.", - chunk_size_name(index), p2i(chunk)); - } else { - // Return humongous chunk to dictionary. - assert(chunk->word_size() > free_chunks(MediumIndex)->size(), "Wrong chunk type."); - assert(chunk->word_size() % free_chunks(SpecializedIndex)->size() == 0, - "Humongous chunk has wrong alignment."); - _humongous_dictionary.return_chunk(chunk); - log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.", - chunk_size_name(index), p2i(chunk), chunk->word_size()); - } - chunk->container()->dec_container_count(); - do_update_in_use_info_for_chunk(chunk, false); + if (c2 != NULL) { + DEBUG_ONLY(c2->verify(false)); - // Chunk has been added; update counters. - account_for_added_chunk(chunk); + // We did merge chunks and now have a bigger chunk. + assert(c2->level() < orig_lvl, "Sanity"); - // Attempt coalesce returned chunks with its neighboring chunks: - // if this chunk is small or special, attempt to coalesce to a medium chunk. - if (index == SmallIndex || index == SpecializedIndex) { - if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) { - // This did not work. But if this chunk is special, we still may form a small chunk? - if (index == SpecializedIndex) { - if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) { - // give up. - } + // Adjust counters - the merged-in chunks have been removed from the free lists, but the counters + // in this chunk manager must be adjusted too. + size_t size_chunks_removed = 0; + for (chklvl_t l = chklvl::ROOT_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + if (num_merged[l] > 0) { + // Since we have a binary tree, we should exactly see one merge per level. + assert(num_merged[l] == 1, "sanity"); + _chunks[l].dec_counter_by(1); + size_chunks_removed += chklvl::word_size_for_level(l); } } - } + _total_word_size.decrement_by(size_chunks_removed); - // From here on do not access chunk anymore, it may have been merged with another chunk. + c = c2; + } -#ifdef ASSERT - EVERY_NTH(VerifyMetaspaceInterval) - this->locked_verify(true); - vsn->verify(true); - vsn->verify_free_chunks_are_ideally_merged(); - END_EVERY_NTH -#endif + return_chunk_simple(c); } -void ChunkManager::return_chunk_list(Metachunk* chunks) { - if (chunks == NULL) { - return; - } - LogTarget(Trace, gc, metaspace, freelist) log; - if (log.is_enabled()) { // tracing - log.print("returning list of chunks..."); - } - unsigned num_chunks_returned = 0; - size_t size_chunks_returned = 0; - Metachunk* cur = chunks; - while (cur != NULL) { - // Capture the next link before it is changed - // by the call to return_chunk_at_head(); - Metachunk* next = cur->next(); - if (log.is_enabled()) { // tracing - num_chunks_returned ++; - size_chunks_returned += cur->word_size(); - } - return_single_chunk(cur); - cur = next; - } - if (log.is_enabled()) { // tracing - log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".", - num_chunks_returned, size_chunks_returned); - } -} -void ChunkManager::collect_statistics(ChunkManagerStatistics* out) const { - MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - out->chunk_stats(i).add(num_free_chunks(i), size_free_chunks_in_bytes(i) / sizeof(MetaWord)); - } +ChunkManager* ChunkManager::_chunkmanager_class = NULL; +ChunkManager* ChunkManager::_chunkmanager_nonclass = NULL; + +static void ChunkManager::initialize(ChunkManager* chunkmanager_class, ChunkManager* chunkmanager_nonclass) { + _chunkmanager_class = chunkmanager_class; + _chunkmanager_nonclass = chunkmanager_nonclass; } } // namespace metaspace --- old/src/hotspot/share/memory/metaspace/chunkManager.hpp 2019-07-22 11:08:03.345668246 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkManager.hpp 2019-07-22 11:08:03.129665932 +0200 @@ -26,173 +26,110 @@ #define SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP #include "memory/allocation.hpp" -#include "memory/binaryTreeDictionary.hpp" -#include "memory/freeList.hpp" +#include "memory/metaspace/chunkLevel.hpp" +#include "memory/metaspace/counter.hpp" #include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/metaspaceStatistics.hpp" -#include "memory/metaspaceChunkFreeListSummary.hpp" -#include "utilities/globalDefinitions.hpp" - -class ChunkManagerTestAccessor; namespace metaspace { -typedef class FreeList ChunkList; -typedef BinaryTreeDictionary > ChunkTreeDictionary; +class VirtualSpaceList; + -// Manages the global free lists of chunks. +// class ChunkManager +// +// The ChunkManager has a central role. Callers request chunks from it. +// It keeps the freelists for chunks. If the freelist is exhausted it +// allocates new chunks from a connected VirtualSpaceList. +// class ChunkManager : public CHeapObj { - friend class ::ChunkManagerTestAccessor; - // Free list of chunks of different sizes. - // SpecializedChunk - // SmallChunk - // MediumChunk - ChunkList _free_chunks[NumberOfFreeLists]; - - // Whether or not this is the class chunkmanager. - const bool _is_class; - - // Return non-humongous chunk list by its index. - ChunkList* free_chunks(ChunkIndex index); - - // Returns non-humongous chunk list for the given chunk word size. - ChunkList* find_free_chunks_list(size_t word_size); - - // HumongousChunk - ChunkTreeDictionary _humongous_dictionary; - - // Returns the humongous chunk dictionary. - ChunkTreeDictionary* humongous_dictionary() { return &_humongous_dictionary; } - const ChunkTreeDictionary* humongous_dictionary() const { return &_humongous_dictionary; } - - // Size, in metaspace words, of all chunks managed by this ChunkManager - size_t _free_chunks_total; - // Number of chunks in this ChunkManager - size_t _free_chunks_count; - - // Update counters after a chunk was added or removed removed. - void account_for_added_chunk(const Metachunk* c); - void account_for_removed_chunk(const Metachunk* c); - - // Given a pointer to a chunk, attempts to merge it with neighboring - // free chunks to form a bigger chunk. Returns true if successful. - bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type); - - // Helper for chunk merging: - // Given an address range with 1-n chunks which are all supposed to be - // free and hence currently managed by this ChunkManager, remove them - // from this ChunkManager and mark them as invalid. - // - This does not correct the occupancy map. - // - This does not adjust the counters in ChunkManager. - // - Does not adjust container count counter in containing VirtualSpaceNode. - // Returns number of chunks removed. - int remove_chunks_in_area(MetaWord* p, size_t word_size); - - // Helper for chunk splitting: given a target chunk size and a larger free chunk, - // split up the larger chunk into n smaller chunks, at least one of which should be - // the target chunk of target chunk size. The smaller chunks, including the target - // chunk, are returned to the freelist. The pointer to the target chunk is returned. - // Note that this chunk is supposed to be removed from the freelist right away. - Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk); - - public: - - ChunkManager(bool is_class); - - // Add or delete (return) a chunk to the global freelist. - Metachunk* chunk_freelist_allocate(size_t word_size); - - // Map a size to a list index assuming that there are lists - // for special, small, medium, and humongous chunks. - ChunkIndex list_index(size_t size); - - // Map a given index to the chunk size. - size_t size_by_index(ChunkIndex index) const; - - bool is_class() const { return _is_class; } - - // Convenience accessors. - size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); } - size_t small_chunk_word_size() const { return size_by_index(SmallIndex); } - size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); } - - // Take a chunk from the ChunkManager. The chunk is expected to be in - // the chunk manager (the freelist if non-humongous, the dictionary if - // humongous). + // A chunk manager is connected to a virtual space list which is used + // to allocate new root chunks when no free chunks are found. + VirtualSpaceList* const _vslist; + + // Name + const char* const _name; + + // Freelist + MetachunkList _chunks [chklvl::NUM_CHUNK_LEVELS]; + + // Total size, in words, of all chunks combined. + SizeCounter _total_word_size; + + // Returns true if this manager contains the given chunk. Slow (walks free list) and + // only needed for verifications. + DEBUG_ONLY(bool contains_chunk(Metachunk* metachunk) const;) + + // Given a chunk we are about to handout to the caller, make sure it is committed + // according to constants::committed_words_on_fresh_chunks. + // May fail if we hit the commit limit. + static bool commit_chunk_before_handout(Metachunk* c); + + // Return a single chunk to the freelist and adjust accounting. No merge is attempted. + void return_chunk_simple(Metachunk* c); + + // Remove the given chunk from its free list and adjust accounting. void remove_chunk(Metachunk* chunk); - // Return a single chunk of type index to the ChunkManager. - void return_single_chunk(Metachunk* chunk); + // Given a chunk which must be outside of a freelist and must be free, split it to + // meet a target level and return it. Splinters are added to the freelist. + Metachunk* split_chunk_and_add_splinters(Metachunk* c, chklvl_t target_level); + +public: + + // Creates a chunk manager with a given name (which is for debug purposes only) + // and an associated space list which will be used to request new chunks from + // (see get_chunk()) + ChunkManager(const char* name, VirtualSpaceList* space_list); - // Add the simple linked list of chunks to the freelist of chunks - // of type index. - void return_chunk_list(Metachunk* chunk); - - // Total of the space in the free chunks list - size_t free_chunks_total_words() const { return _free_chunks_total; } - size_t free_chunks_total_bytes() const { return free_chunks_total_words() * BytesPerWord; } - - // Number of chunks in the free chunks list - size_t free_chunks_count() const { return _free_chunks_count; } - - // Remove from a list by size. Selects list based on size of chunk. - Metachunk* free_chunks_get(size_t chunk_word_size); - -#define index_bounds_check(index) \ - assert(is_valid_chunktype(index), "Bad index: %d", (int) index) - - size_t num_free_chunks(ChunkIndex index) const { - index_bounds_check(index); - - if (index == HumongousIndex) { - return _humongous_dictionary.total_free_blocks(); - } - - ssize_t count = _free_chunks[index].count(); - return count == -1 ? 0 : (size_t) count; - } - - size_t size_free_chunks_in_bytes(ChunkIndex index) const { - index_bounds_check(index); - - size_t word_size = 0; - if (index == HumongousIndex) { - word_size = _humongous_dictionary.total_size(); - } else { - const size_t size_per_chunk_in_words = _free_chunks[index].size(); - word_size = size_per_chunk_in_words * num_free_chunks(index); - } - - return word_size * BytesPerWord; - } - - MetaspaceChunkFreeListSummary chunk_free_list_summary() const { - return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), - num_free_chunks(SmallIndex), - num_free_chunks(MediumIndex), - num_free_chunks(HumongousIndex), - size_free_chunks_in_bytes(SpecializedIndex), - size_free_chunks_in_bytes(SmallIndex), - size_free_chunks_in_bytes(MediumIndex), - size_free_chunks_in_bytes(HumongousIndex)); - } - -#ifdef ASSERT - // Debug support - // Verify free list integrity. slow=true: verify chunk-internal integrity too. - void verify(bool slow) const; - void locked_verify(bool slow) const; -#endif + // Get a chunk and be smart about it. + // - 1) Attempt to find a free chunk of exactly the pref_level level + // - 2) Failing that, attempt to find a chunk smaller or equal the minimal level. + // - 3) Failing that, attempt to find a free chunk of larger size and split it. + // - 4) Failing that, attempt to allocate a new chunk from the connected virtual space. + // - Failing that, give up and return NULL. + // Note: this is not guaranteed to return a *committed* chunk. The chunk manager will + // attempt to commit the returned chunk according to constants::committed_words_on_fresh_chunks; + // but this may fail if we hit a commit limit. In that case, a partly uncommitted chunk + // will be returned, and the commit is attempted again when we allocate from the chunk's + // uncommitted area. See also Metachunk::allocate. + Metachunk* get_chunk(chklvl_t min_level, chklvl_t pref_level); - void locked_print_free_chunks(outputStream* st); + // Return a single chunk to the ChunkManager and adjust accounting. May merge chunk + // with neighbors. + // Happens after a Classloader was unloaded and releases its metaspace chunks. + // !! Note: this may invalidate the chunk. Do not access the chunk after + // this function returns !! + void return_chunk(Metachunk* c); + + // Uncommit payload area of free chunks. Will be called during Metaspace GC. + void uncommit_free_chunks(); + + // Run verifications. slow=true: verify chunk-internal integrity too. + DEBUG_ONLY(void verify(bool slow) const;) + + // Returns the name of this chunk manager. + const char* name() const { return _name; } + + // Returns number of words in all free chunks. + size_t total_word_size() const { return _total_word_size.get(); } + + +private: + + static ChunkManager* _chunkmanager_class; + static ChunkManager* _chunkmanager_nonclass; + +public: + + static ChunkManager* chunkmanager_class() { return _chunkmanager_class; } + static ChunkManager* chunkmanager_nonclass() { return _chunkmanager_nonclass; } + + static void initialize(ChunkManager* chunkmanager_class, ChunkManager* chunkmanager_nonclass); - // Fill in current statistic values to the given statistics object. - void collect_statistics(ChunkManagerStatistics* out) const; }; } // namespace metaspace - #endif // SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP --- old/src/hotspot/share/memory/metaspace/metachunk.cpp 2019-07-22 11:08:03.833673473 +0200 +++ new/src/hotspot/share/memory/metaspace/metachunk.cpp 2019-07-22 11:08:03.621671202 +0200 @@ -22,151 +22,148 @@ * */ + #include "precompiled.hpp" -#include "memory/allocation.hpp" + +#include "memory/metaspace/chunkLevel.hpp" +#include "memory/metaspace/constants.hpp" #include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/occupancyMap.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" + #include "utilities/align.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" namespace metaspace { -size_t Metachunk::object_alignment() { - // Must align pointers and sizes to 8, - // so that 64 bit types get correctly aligned. - const size_t alignment = 8; - - // Make sure that the Klass alignment also agree. - STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes); - - return alignment; -} - -size_t Metachunk::overhead() { - return align_up(sizeof(Metachunk), object_alignment()) / BytesPerWord; -} - -// Metachunk methods - -Metachunk::Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size, - VirtualSpaceNode* container) - : Metabase(word_size), - _container(container), - _top(NULL), - _sentinel(CHUNK_SENTINEL), - _chunk_type(chunktype), - _is_class(is_class), - _origin(origin_normal), - _use_count(0) -{ - _top = initial_top(); - set_is_tagged_free(false); -#ifdef ASSERT - mangle(uninitMetaWordVal); - verify(); -#endif -} +// Make sure that the Klass alignment also agree. +STATIC_ASSERT(Metachunk::allocation_alignment_bytes == (size_t)KlassAlignmentInBytes); -MetaWord* Metachunk::allocate(size_t word_size) { - MetaWord* result = NULL; - // If available, bump the pointer to allocate. - if (free_word_size() >= word_size) { - result = _top; - _top = _top + word_size; +void Metachunk::remove_from_list() { + if (_prev != NULL) { + _prev->set_next(_next); } - return result; + if (_next != NULL) { + _next->set_prev(_prev); + } + _prev = _next = NULL; } -// _bottom points to the start of the chunk including the overhead. -size_t Metachunk::used_word_size() const { - return pointer_delta(_top, bottom(), sizeof(MetaWord)); -} +// Ensure that chunk is committed up to at least word_size words. +bool Metachunk::ensure_committed(size_t new_committed_words) { + + assert(new_committed_words <= word_size(), "too much."); + + if (new_committed_words <= committed_words()) { + return true; + } + + // Note: we may commit more than the area of our own chunk and that is okay. + MetaWord* const commit_top = align_down(base() + committed_words(), constants::commit_granule_words); + MetaWord* const new_commit_top = align_up(base() + new_committed_words, constants::commit_granule_words); + + { + // Expand lock from here on. + MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); + if (!_vsnode->ensure_range_is_committed(commit_top, new_commit_top - commit_top)) { + return false; + } + // Remember how far we have committed. + _committed_words = new_commit_top - base(); + if (_committed_words > word_size()) { + _committed_words = word_size(); + } + } + + return true; -size_t Metachunk::free_word_size() const { - return pointer_delta(end(), _top, sizeof(MetaWord)); } -void Metachunk::print_on(outputStream* st) const { - st->print_cr("Metachunk:" - " bottom " PTR_FORMAT " top " PTR_FORMAT - " end " PTR_FORMAT " size " SIZE_FORMAT " (%s)", - p2i(bottom()), p2i(_top), p2i(end()), word_size(), - chunk_size_name(get_chunk_type())); - if (Verbose) { - st->print_cr(" used " SIZE_FORMAT " free " SIZE_FORMAT, - used_word_size(), free_word_size()); + +// Allocate word_size words from this chunk. +// +// May cause memory to be committed. That may fail if we hit a commit limit. In that case, +// NULL is returned and p_did_hit_commit_limit will be set to true. +// If the remainder portion of the chunk was too small to hold the allocation, +// NULL is returned and p_did_hit_commit_limit will be set to false. +MetaWord* Metachunk::allocate(size_t word_size, bool* p_did_hit_commit_limit) { + + size_t request_word_size = align_up(word_size, allocation_alignment_words); + + // Space enough left? + if (free_words() < request_word_size) { + *p_did_hit_commit_limit = false; + return NULL; + } + + // Expand committed region if necessary. + if (ensure_committed(used_words() + request_word_size) == false) { + *p_did_hit_commit_limit = true; + return NULL; } + + MetaWord* const p = top(); + + _used_words += request_word_size; + + return p; + } #ifdef ASSERT -void Metachunk::mangle(juint word_value) { - // Overwrite the payload of the chunk and not the links that - // maintain list of chunks. - HeapWord* start = (HeapWord*)initial_top(); - size_t size = word_size() - overhead(); - Copy::fill_to_words(start, size, word_value); -} - -void Metachunk::verify() const { - assert(is_valid_sentinel(), "Chunk " PTR_FORMAT ": sentinel invalid", p2i(this)); - const ChunkIndex chunk_type = get_chunk_type(); - assert(is_valid_chunktype(chunk_type), "Chunk " PTR_FORMAT ": Invalid chunk type.", p2i(this)); - if (chunk_type != HumongousIndex) { - assert(word_size() == get_size_for_nonhumongous_chunktype(chunk_type, is_class()), - "Chunk " PTR_FORMAT ": wordsize " SIZE_FORMAT " does not fit chunk type %s.", - p2i(this), word_size(), chunk_size_name(chunk_type)); - } - assert(is_valid_chunkorigin(get_origin()), "Chunk " PTR_FORMAT ": Invalid chunk origin.", p2i(this)); - assert(bottom() <= _top && _top <= (MetaWord*)end(), - "Chunk " PTR_FORMAT ": Chunk top out of chunk bounds.", p2i(this)); - - // For non-humongous chunks, starting address shall be aligned - // to its chunk size. Humongous chunks start address is - // aligned to specialized chunk size. - const size_t required_alignment = - (chunk_type != HumongousIndex ? word_size() : get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class())) * sizeof(MetaWord); - assert(is_aligned((address)this, required_alignment), - "Chunk " PTR_FORMAT ": (size " SIZE_FORMAT ") not aligned to " SIZE_FORMAT ".", - p2i(this), word_size() * sizeof(MetaWord), required_alignment); -} +void Metachunk::verify(bool slow) const { + + // Note: only call this on a life Metachunk. + chklvl::check_valid_level(level()); + + assert(base() != NULL, "No base ptr"); + assert(committed_words() >= used_words(), "Sanity"); + assert(end() >= committed_words(), "Sanity"); + + // Test base pointer + assert(vsnode() != NULL, "No space"); + vsnode()->check_pointer(base()); + assert(base() != NULL, "Base pointer NULL"); + + // Starting address shall be aligned to chunk size. + const size_t required_alignment = word_size() * sizeof(MetaWord); + assert_is_aligned(base(), required_alignment); + + // Used words + assert(used_words() < word_size(), "oob"); + + // If we are not a root chunk, we shall have a reference to a tree node + assert(tree_node_ref() != 0 || level() == chklvl::ROOT_CHUNK_LEVEL, "No parent node."); + +} #endif // ASSERT -// Helper, returns a descriptive name for the given index. -const char* chunk_size_name(ChunkIndex index) { - switch (index) { - case SpecializedIndex: - return "specialized"; - case SmallIndex: - return "small"; - case MediumIndex: - return "medium"; - case HumongousIndex: - return "humongous"; - default: - return "Invalid index"; + +#ifdef ASSERT + +bool MetachunkList::contains(const Metachunk* c) const { + for (Metachunk* c2 = first(); c2 != NULL; c2 = c2->next()) { + if (c == c2) { + return true; + } } + return false; } -#ifdef ASSERT -void do_verify_chunk(Metachunk* chunk) { - guarantee(chunk != NULL, "Sanity"); - // Verify chunk itself; then verify that it is consistent with the - // occupany map of its containing node. - chunk->verify(); - VirtualSpaceNode* const vsn = chunk->container(); - OccupancyMap* const ocmap = vsn->occupancy_map(); - ocmap->verify_for_chunk(chunk); -} -#endif - -void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) { - chunk->set_is_tagged_free(!inuse); - OccupancyMap* const ocmap = chunk->container()->occupancy_map(); - ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse); +void MetachunkList::verify(bool slow) const { + int num = 0; + for (Metachunk* c = first(); c != NULL; c = c->next()) { + num ++; + if (slow) { + c->verify(false); + } + _num.check(num); + } } +#endif // ASSERT + } // namespace metaspace --- old/src/hotspot/share/memory/metaspace/metachunk.hpp 2019-07-22 11:08:04.325678742 +0200 +++ new/src/hotspot/share/memory/metaspace/metachunk.hpp 2019-07-22 11:08:04.113676472 +0200 @@ -24,12 +24,14 @@ #ifndef SHARE_MEMORY_METASPACE_METACHUNK_HPP #define SHARE_MEMORY_METASPACE_METACHUNK_HPP -#include "memory/metaspace/metabase.hpp" -#include "memory/metaspace/metaspaceCommon.hpp" + +#include "memory/metaspace/counter.hpp" +#include "memory/metaspace/abstractPool.hpp" +#include "memory/metaspace/chunkLevel.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" -class MetachunkTest; + namespace metaspace { @@ -39,135 +41,194 @@ // Metachunks are reused (when freed are put on a global freelist) and // have no permanent association to a SpaceManager. -// +--------------+ <- end --+ --+ -// | | | | -// | | | free | -// | | | | -// | | | | size | capacity -// | | | | -// | | <- top -- + | -// | | | | -// | | | used | -// | | | | -// | | | | -// +--------------+ <- bottom --+ --+ - -enum ChunkOrigin { - // Chunk normally born (via take_from_committed) - origin_normal = 1, - // Chunk was born as padding chunk - origin_pad = 2, - // Chunk was born as leftover chunk in VirtualSpaceNode::retire - origin_leftover = 3, - // Chunk was born as result of a merge of smaller chunks - origin_merge = 4, - // Chunk was born as result of a split of a larger chunk - origin_split = 5, - - origin_minimum = origin_normal, - origin_maximum = origin_split, - origins_count = origin_maximum + 1 -}; - -inline bool is_valid_chunkorigin(ChunkOrigin origin) { - return origin == origin_normal || - origin == origin_pad || - origin == origin_leftover || - origin == origin_merge || - origin == origin_split; -} - -class Metachunk : public Metabase { - - friend class ::MetachunkTest; - - // The VirtualSpaceNode containing this chunk. - VirtualSpaceNode* const _container; - - // Current allocation top. - MetaWord* _top; - - // A 32bit sentinel for debugging purposes. - enum { CHUNK_SENTINEL = 0x4d4554EF, // "MET" - CHUNK_SENTINEL_INVALID = 0xFEEEEEEF - }; - - uint32_t _sentinel; - - const ChunkIndex _chunk_type; - const bool _is_class; - // Whether the chunk is free (in freelist) or in use by some class loader. - bool _is_tagged_free; - - ChunkOrigin _origin; - int _use_count; - - MetaWord* initial_top() const { return (MetaWord*)this + overhead(); } - MetaWord* top() const { return _top; } +// +--------------+ <- end ----+ --+ +// | | | | +// | | | free | +// | | | +// | | | | size (aka capacity) +// | | | | +// | ----------- | <- top -- + | +// | | | | +// | | | used | +// +--------------+ <- start -- + -- + + +// Note: this is a chunk **descriptor**. The real Payload area lives in metaspace, +// this class lives somewhere else. +class Metachunk { + + // Todo: compact this node. A lot of things can be expressed more tighter. + + // A chunk header is kept in a list: + // - in the list of used chunks inside a SpaceManager, if it is in use + // - in the list of free chunks inside a ChunkManager, if it is free + // - in the freelist of dead headers inside the MetaChunkHeaderPool, + // if it is dead (e.g. result of chunk merging). + Metachunk* _prev; + Metachunk* _next; + + chklvl_t _level; // aka size. + + // true: free, owned by ChunkManager + // false: in-use, owned by SpaceManager + // if dead, meaningless + bool _is_free; + + // start of chunk memory; NULL if dead. + MetaWord* _base; + + // Used words. + size_t _used_words; + + // Guaranteed-to-be-committed-words, counted from base + // (This is a performance optimization. The underlying VirtualSpaceNode knows + // which granules are committed; but we want to avoid asking it unnecessarily + // in Metachunk::allocate(), so we keep a limit until which we are guaranteed + // to have committed memory under us.) + size_t _committed_words; + + // the chunk tree node this header is hanging under; NULL if dead. + u2 _tree_node_ref; + + // We need unfortunately a back link to the virtual space node + // for splitting and merging nodes. + VirtualSpaceNode* _vsnode; + + MetaWord* top() const { return base() + _used_words; } + +public: + + Metachunk() + : _prev(NULL), _next(NULL), + _level(chklvl::ROOT_CHUNK_LEVEL), + _is_free(true), + _base(NULL), + _used_words(0), + _committed_words(0), + _tree_node_ref(0), + _vsnode(NULL) + {} + + size_t word_size() const { return chklvl::word_size_for_level(_level); } + + MetaWord* base() const { return _base; } + void set_base(MetaWord* p) { _base = p; } + MetaWord* end() const { return base() + word_size(); } + + void set_prev(Metachunk* c) { _prev = c; } + Metachunk* prev() const { return _prev; } + void set_next(Metachunk* c) { _next = c; } + Metachunk* next() const { return _next; } + // Remove chunk from whatever list it lives in by wiring next with previous. + void remove_from_list(); + + bool is_free() const { return _is_free; } + bool is_in_use() const { return !_is_free; } + void set_free(bool v) { _is_free = v; } + + + void inc_level() { _level ++; DEBUG_ONLY(chklvl::is_valid_level(_level);) } + void dec_level() { _level --; DEBUG_ONLY(chklvl::is_valid_level(_level);) } + void set_level(chklvl_t v) { _level = v; DEBUG_ONLY(chklvl::is_valid_level(_level);) } + chklvl_t level() const { return _level; } + + void set_tree_node_ref(u2 v) { _tree_node_ref = v; } + u2 tree_node_ref() const { return _tree_node_ref; } + + VirtualSpaceNode* vsnode() const { return _vsnode; } + void set_vsnode(VirtualSpaceNode* n) { _vsnode = n; } + + size_t used_words() const { return _used_words; } + size_t free_words() const { return word_size() - used_words(); } + size_t free_below_committed_words() const { return committed_words() - used_words(); } + void reset_used_words() { _used_words = 0; } + + size_t committed_words() const { return _committed_words; } + void set_committed_words(size_t v) { _committed_words = v; } + bool is_fully_committed() const { return committed_words() == word_size(); } + + // Ensure that chunk is committed up to at least word_size words. + // Fails if we hit a commit limit. + bool ensure_committed(size_t word_size); + + // Alignment of an allocation. + static const size_t allocation_alignment_bytes = 8; + static const size_t allocation_alignment_words = allocation_alignment_bytes / BytesPerWord; + + // Allocation from a chunk + + // Allocate word_size words from this chunk. + // + // May cause memory to be committed. That may fail if we hit a commit limit. In that case, + // NULL is returned and p_did_hit_commit_limit will be set to true. + // If the remainder portion of the chunk was too small to hold the allocation, + // NULL is returned and p_did_hit_commit_limit will be set to false. + MetaWord* allocate(size_t word_size, bool* p_did_hit_commit_limit); + + // Wipe this object to look as if it were default constructed. + void wipe() { + _prev = NULL; _next = NULL; + _level = chklvl::ROOT_CHUNK_LEVEL; + _is_free = true; + _base = NULL; + _used_words = 0; + _committed_words = 0; + _tree_node_ref = 0; + _vsnode = NULL; + } - public: - // Metachunks are allocated out of a MetadataVirtualSpace and - // and use some of its space to describe itself (plus alignment - // considerations). Metadata is allocated in the rest of the chunk. - // This size is the overhead of maintaining the Metachunk within - // the space. + //// Debug stuff //// + DEBUG_ONLY(void verify(bool slow) const;) - // Alignment of each allocation in the chunks. - static size_t object_alignment(); - - // Size of the Metachunk header, in words, including alignment. - static size_t overhead(); - - Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size, VirtualSpaceNode* container); - - MetaWord* allocate(size_t word_size); - - VirtualSpaceNode* container() const { return _container; } - - MetaWord* bottom() const { return (MetaWord*) this; } - - // Reset top to bottom so chunk can be reused. - void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); } - bool is_empty() { return _top == initial_top(); } - - // used (has been allocated) - // free (available for future allocations) - size_t word_size() const { return size(); } - size_t used_word_size() const; - size_t free_word_size() const; - - bool is_tagged_free() { return _is_tagged_free; } - void set_is_tagged_free(bool v) { _is_tagged_free = v; } - - bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; } +}; - void print_on(outputStream* st) const; - bool is_valid_sentinel() const { return _sentinel == CHUNK_SENTINEL; } - void remove_sentinel() { _sentinel = CHUNK_SENTINEL_INVALID; } +class MetachunkList { - int get_use_count() const { return _use_count; } - void inc_use_count() { _use_count ++; } + Metachunk* _first; + IntCounter _num; - ChunkOrigin get_origin() const { return _origin; } - void set_origin(ChunkOrigin orig) { _origin = orig; } +public: - ChunkIndex get_chunk_type() const { return _chunk_type; } - bool is_class() const { return _is_class; } + MetachunkList() : _first(NULL), _num() {} - DEBUG_ONLY(void mangle(juint word_value);) - DEBUG_ONLY(void verify() const;) + Metachunk* first() const { return _first; } + int size() const { return _num.get(); } + + void add(Metachunk* c) { + c->set_next(_first); + _first = c; + _num.increment(); + } + + Metachunk* remove_first() { + Metachunk* c = _first; + if (c != NULL) { + _first = c->next(); + } + _num.decrement(); + return c; + } + + void remove(Metachunk* c) { + assert(contains(c), "Does not contain this chunk"); + c->remove_from_list(); + _num.decrement(); + } + + // Manually decrement counter; needed for cases where chunks + // have been manually removed from the list without informing + // the list, e.g. chunk merging, see chunkManager::return_chunk(). + void dec_counter_by(int v) { + _num.decrement_by(v); + } + +#ifdef ASSERT + bool contains(const Metachunk* c) const; + void verify(bool slow) const; +#endif }; - -// Helper function that does a bunch of checks for a chunk. -DEBUG_ONLY(void do_verify_chunk(Metachunk* chunk);) - -// Given a Metachunk, update its in-use information (both in the -// chunk and the occupancy map). -void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse); - } // namespace metaspace #endif // SHARE_MEMORY_METASPACE_METACHUNK_HPP --- old/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp 2019-07-22 11:08:04.821684054 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp 2019-07-22 11:08:04.605681741 +0200 @@ -130,70 +130,6 @@ } } -// Returns size of this chunk type. -size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) { - assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type."); - size_t size = 0; - if (is_class) { - switch(chunktype) { - case SpecializedIndex: size = ClassSpecializedChunk; break; - case SmallIndex: size = ClassSmallChunk; break; - case MediumIndex: size = ClassMediumChunk; break; - default: - ShouldNotReachHere(); - } - } else { - switch(chunktype) { - case SpecializedIndex: size = SpecializedChunk; break; - case SmallIndex: size = SmallChunk; break; - case MediumIndex: size = MediumChunk; break; - default: - ShouldNotReachHere(); - } - } - return size; -} - -ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) { - if (is_class) { - if (size == ClassSpecializedChunk) { - return SpecializedIndex; - } else if (size == ClassSmallChunk) { - return SmallIndex; - } else if (size == ClassMediumChunk) { - return MediumIndex; - } else if (size > ClassMediumChunk) { - // A valid humongous chunk size is a multiple of the smallest chunk size. - assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size"); - return HumongousIndex; - } - } else { - if (size == SpecializedChunk) { - return SpecializedIndex; - } else if (size == SmallChunk) { - return SmallIndex; - } else if (size == MediumChunk) { - return MediumIndex; - } else if (size > MediumChunk) { - // A valid humongous chunk size is a multiple of the smallest chunk size. - assert(is_aligned(size, SpecializedChunk), "Invalid chunk size"); - return HumongousIndex; - } - } - ShouldNotReachHere(); - return (ChunkIndex)-1; -} - -ChunkIndex next_chunk_index(ChunkIndex i) { - assert(i < NumberOfInUseLists, "Out of bound"); - return (ChunkIndex) (i+1); -} - -ChunkIndex prev_chunk_index(ChunkIndex i) { - assert(i > ZeroIndex, "Out of bound"); - return (ChunkIndex) (i-1); -} - const char* loaders_plural(uintx num) { return num == 1 ? "loader" : "loaders"; } --- old/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp 2019-07-22 11:08:05.309689280 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp 2019-07-22 11:08:05.093686967 +0200 @@ -33,14 +33,6 @@ namespace metaspace { -enum ChunkSizes { // in words. - ClassSpecializedChunk = 128, - SpecializedChunk = 128, - ClassSmallChunk = 256, - SmallChunk = 512, - ClassMediumChunk = 4 * K, - MediumChunk = 8 * K -}; // Print a size, in words, scaled. void print_scaled_words(outputStream* st, size_t word_size, size_t scale = 0, int width = -1); @@ -98,48 +90,6 @@ extern internal_statistics_t g_internal_statistics; #endif -// ChunkIndex defines the type of chunk. -// Chunk types differ by size: specialized < small < medium, chunks -// larger than medium are humongous chunks of varying size. -enum ChunkIndex { - ZeroIndex = 0, - SpecializedIndex = ZeroIndex, - SmallIndex = SpecializedIndex + 1, - MediumIndex = SmallIndex + 1, - HumongousIndex = MediumIndex + 1, - NumberOfFreeLists = 3, - NumberOfInUseLists = 4 -}; - -// Utility functions. -size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunk_type, bool is_class); -ChunkIndex get_chunk_type_by_size(size_t size, bool is_class); - -ChunkIndex next_chunk_index(ChunkIndex i); -ChunkIndex prev_chunk_index(ChunkIndex i); -// Returns a descriptive name for a chunk type. -const char* chunk_size_name(ChunkIndex index); - -// Verify chunk sizes. -inline bool is_valid_chunksize(bool is_class, size_t size) { - const size_t reasonable_maximum_humongous_chunk_size = 1 * G; - return is_aligned(size, sizeof(MetaWord)) && - size < reasonable_maximum_humongous_chunk_size && - is_class ? - (size == ClassSpecializedChunk || size == ClassSmallChunk || size >= ClassMediumChunk) : - (size == SpecializedChunk || size == SmallChunk || size >= MediumChunk); -} - -// Verify chunk type. -inline bool is_valid_chunktype(ChunkIndex index) { - return index == SpecializedIndex || index == SmallIndex || - index == MediumIndex || index == HumongousIndex; -} - -inline bool is_valid_nonhumongous_chunktype(ChunkIndex index) { - return is_valid_chunktype(index) && index != HumongousIndex; -} - // Pretty printing helpers const char* classes_plural(uintx num); const char* loaders_plural(uintx num); --- old/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp 2019-07-22 11:08:05.805694591 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp 2019-07-22 11:08:05.589692279 +0200 @@ -24,9 +24,11 @@ */ #include "precompiled.hpp" -#include "memory/metaspace/metachunk.hpp" + +#include "memory/metaspace/chunkLevel.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" + #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" @@ -35,116 +37,77 @@ // FreeChunksStatistics methods -FreeChunksStatistics::FreeChunksStatistics() -: _num(0), _cap(0) -{} - -void FreeChunksStatistics::reset() { - _num = 0; _cap = 0; -} - -void FreeChunksStatistics::add(uintx n, size_t s) { - _num += n; _cap += s; -} - -void FreeChunksStatistics::add(const FreeChunksStatistics& other) { - _num += other._num; - _cap += other._cap; -} - void FreeChunksStatistics::print_on(outputStream* st, size_t scale) const { - st->print(UINTX_FORMAT, _num); + st->print(UINTX_FORMAT, num); st->print(" chunks, total capacity "); - print_scaled_words(st, _cap, scale); + print_scaled_words(st, word_size, scale); } // ChunkManagerStatistics methods void ChunkManagerStatistics::reset() { - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - _chunk_stats[i].reset(); + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + chunk_stats[l].reset(); } } size_t ChunkManagerStatistics::total_capacity() const { - return _chunk_stats[SpecializedIndex].cap() + - _chunk_stats[SmallIndex].cap() + - _chunk_stats[MediumIndex].cap() + - _chunk_stats[HumongousIndex].cap(); + size_t cap = 0; + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + cap += chunk_stats[l].word_size; + } } void ChunkManagerStatistics::print_on(outputStream* st, size_t scale) const { FreeChunksStatistics totals; - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { + totals.reset(); + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { st->cr(); - st->print("%12s chunks: ", chunk_size_name(i)); - if (_chunk_stats[i].num() > 0) { - st->print(UINTX_FORMAT_W(4) ", capacity ", _chunk_stats[i].num()); - print_scaled_words(st, _chunk_stats[i].cap(), scale); + st->print(SIZE_FORMAT "k chunks: ", (chklvl::word_size_for_level(l) * BytesPerWord) / K); + if (chunk_stats[l].num > 0) { + st->print(UINTX_FORMAT_W(4) ", capacity ", chunk_stats[l].num); + print_scaled_words(st, chunk_stats[l].word_size, scale); } else { st->print("(none)"); } - totals.add(_chunk_stats[i]); + totals.add(chunk_stats[l]); } st->cr(); - st->print("%19s: " UINTX_FORMAT_W(4) ", capacity=", "Total", totals.num()); - print_scaled_words(st, totals.cap(), scale); + st->print("Total: " UINTX_FORMAT_W(4) ", capacity=", totals.num()); + print_scaled_words(st, totals.word_size, scale); st->cr(); } // UsedChunksStatistics methods -UsedChunksStatistics::UsedChunksStatistics() -: _num(0), _cap(0), _used(0), _free(0), _waste(0), _overhead(0) -{} - -void UsedChunksStatistics::reset() { - _num = 0; - _cap = _overhead = _used = _free = _waste = 0; -} - -void UsedChunksStatistics::add(const UsedChunksStatistics& other) { - _num += other._num; - _cap += other._cap; - _used += other._used; - _free += other._free; - _waste += other._waste; - _overhead += other._overhead; - DEBUG_ONLY(check_sanity()); -} - void UsedChunksStatistics::print_on(outputStream* st, size_t scale) const { int col = st->position(); - st->print(UINTX_FORMAT_W(4) " chunk%s, ", _num, _num != 1 ? "s" : ""); - if (_num > 0) { + st->print(UINTX_FORMAT_W(4) " chunk%s, ", num, num != 1 ? "s" : ""); + if (num > 0) { col += 14; st->fill_to(col); - print_scaled_words(st, _cap, scale, 5); + print_scaled_words(st, cap, scale, 5); st->print(" capacity, "); col += 18; st->fill_to(col); - print_scaled_words_and_percentage(st, _used, _cap, scale, 5); + print_scaled_words_and_percentage(st, used, cap, scale, 5); st->print(" used, "); col += 20; st->fill_to(col); - print_scaled_words_and_percentage(st, _free, _cap, scale, 5); + print_scaled_words_and_percentage(st, free, cap, scale, 5); st->print(" free, "); col += 20; st->fill_to(col); - print_scaled_words_and_percentage(st, _waste, _cap, scale, 5); - st->print(" waste, "); + print_scaled_words_and_percentage(st, waste, cap, scale, 5); + st->print(" waste "); - col += 20; st->fill_to(col); - print_scaled_words_and_percentage(st, _overhead, _cap, scale, 5); - st->print(" overhead"); } DEBUG_ONLY(check_sanity()); } #ifdef ASSERT void UsedChunksStatistics::check_sanity() const { - assert(_overhead == (Metachunk::overhead() * _num), "Sanity: Overhead."); - assert(_cap == _used + _free + _waste + _overhead, "Sanity: Capacity."); + assert(cap == used + free + waste, "Sanity: Capacity."); } #endif @@ -153,30 +116,27 @@ SpaceManagerStatistics::SpaceManagerStatistics() { reset(); } void SpaceManagerStatistics::reset() { - for (int i = 0; i < NumberOfInUseLists; i ++) { - _chunk_stats[i].reset(); - _free_blocks_num = 0; _free_blocks_cap_words = 0; + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + chunk_stats[l].reset(); } -} - -void SpaceManagerStatistics::add_free_blocks_info(uintx num, size_t cap) { - _free_blocks_num += num; - _free_blocks_cap_words += cap; + free_blocks_num = 0; + free_blocks_word_size = 0; } void SpaceManagerStatistics::add(const SpaceManagerStatistics& other) { - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - _chunk_stats[i].add(other._chunk_stats[i]); + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + chunk_stats[l].add(other.chunk_stats[l]); } - _free_blocks_num += other._free_blocks_num; - _free_blocks_cap_words += other._free_blocks_cap_words; + free_blocks_num += other.free_blocks_num; + free_blocks_word_size += other.free_blocks_word_size; } // Returns total chunk statistics over all chunk types. UsedChunksStatistics SpaceManagerStatistics::totals() const { UsedChunksStatistics stat; - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - stat.add(_chunk_stats[i]); + stat.reset(); + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { + stat.add(chunk_stats[l]); } return stat; } @@ -185,16 +145,16 @@ streamIndentor sti(st); if (detailed) { st->cr_indent(); - st->print("Usage by chunk type:"); + st->print("Usage by chunk level:"); { streamIndentor sti2(st); - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { + for (chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; l <= chklvl::HIGHEST_CHUNK_LEVEL; l ++) { st->cr_indent(); - st->print("%15s: ", chunk_size_name(i)); - if (_chunk_stats[i].num() == 0) { + st->print(SIZE_FORMAT "k chunks: ", (chklvl::word_size_for_level(l) * BytesPerWord) / K); + if (chunk_stats[l].num == 0) { st->print(" (none)"); } else { - _chunk_stats[i].print_on(st, scale); + chunk_stats[l].print_on(st, scale); } } @@ -202,61 +162,44 @@ st->print("%15s: ", "-total-"); totals().print_on(st, scale); } - if (_free_blocks_num > 0) { + if (free_blocks_num > 0) { st->cr_indent(); - st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num); - print_scaled_words(st, _free_blocks_cap_words, scale); + st->print("deallocated: " UINTX_FORMAT " blocks with ", free_blocks_num); + print_scaled_words(st, free_blocks_word_size, scale); } } else { totals().print_on(st, scale); st->print(", "); - st->print("deallocated: " UINTX_FORMAT " blocks with ", _free_blocks_num); - print_scaled_words(st, _free_blocks_cap_words, scale); + st->print("deallocated: " UINTX_FORMAT " blocks with ", free_blocks_num); + print_scaled_words(st, free_blocks_word_size, scale); } } // ClassLoaderMetaspaceStatistics methods -ClassLoaderMetaspaceStatistics::ClassLoaderMetaspaceStatistics() { reset(); } - -void ClassLoaderMetaspaceStatistics::reset() { - nonclass_sm_stats().reset(); - if (Metaspace::using_class_space()) { - class_sm_stats().reset(); - } -} - // Returns total space manager statistics for both class and non-class metaspace SpaceManagerStatistics ClassLoaderMetaspaceStatistics::totals() const { SpaceManagerStatistics stats; - stats.add(nonclass_sm_stats()); - if (Metaspace::using_class_space()) { - stats.add(class_sm_stats()); - } + stats.reset(); + stats.add(sm_stats[Metaspace::ClassType]); + stats.add(sm_stats[Metaspace::NonClassType]); return stats; } -void ClassLoaderMetaspaceStatistics::add(const ClassLoaderMetaspaceStatistics& other) { - nonclass_sm_stats().add(other.nonclass_sm_stats()); - if (Metaspace::using_class_space()) { - class_sm_stats().add(other.class_sm_stats()); - } -} - void ClassLoaderMetaspaceStatistics::print_on(outputStream* st, size_t scale, bool detailed) const { streamIndentor sti(st); st->cr_indent(); if (Metaspace::using_class_space()) { st->print("Non-Class: "); } - nonclass_sm_stats().print_on(st, scale, detailed); + sm_stats[Metaspace::NonClassType].print_on(st, scale, detailed); if (detailed) { st->cr(); } if (Metaspace::using_class_space()) { st->cr_indent(); st->print(" Class: "); - class_sm_stats().print_on(st, scale, detailed); + sm_stats[Metaspace::ClassType].print_on(st, scale, detailed); if (detailed) { st->cr(); } --- old/src/hotspot/share/memory/metaspace/metaspaceStatistics.hpp 2019-07-22 11:08:06.301699902 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceStatistics.hpp 2019-07-22 11:08:06.089697632 +0200 @@ -26,44 +26,39 @@ #ifndef SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP #define SHARE_MEMORY_METASPACE_METASPACESTATISTICS_HPP +#include "memory/metaspace.hpp" // for MetadataType enum +#include "memory/metaspace/chunkLevel.hpp" #include "utilities/globalDefinitions.hpp" -#include "memory/metaspace.hpp" // for MetadataType enum -#include "memory/metaspace/metachunk.hpp" // for ChunkIndex enum class outputStream; namespace metaspace { // Contains statistics for a number of free chunks. -class FreeChunksStatistics { - uintx _num; // Number of chunks - size_t _cap; // Total capacity, in words +struct FreeChunksStatistics { -public: - FreeChunksStatistics(); + uintx num; // Number of chunks + size_t word_size; // Total capacity, in words - void reset(); + void reset() { num = 0; word_size = 0; } + + void add(uintx n, size_t s) { + num += n; word_size += s; + } - uintx num() const { return _num; } - size_t cap() const { return _cap; } + void add(const FreeChunksStatistics& other) { + num += other.num; word_size += other.word_size; + } - void add(uintx n, size_t s); - void add(const FreeChunksStatistics& other); void print_on(outputStream* st, size_t scale) const; }; // end: FreeChunksStatistics // Contains statistics for a ChunkManager. -class ChunkManagerStatistics { - - FreeChunksStatistics _chunk_stats[NumberOfInUseLists]; - -public: +struct ChunkManagerStatistics { - // Free chunk statistics, by chunk index. - const FreeChunksStatistics& chunk_stats(ChunkIndex index) const { return _chunk_stats[index]; } - FreeChunksStatistics& chunk_stats(ChunkIndex index) { return _chunk_stats[index]; } + FreeChunksStatistics chunk_stats[chklvl::NUM_CHUNK_LEVELS]; void reset(); size_t total_capacity() const; @@ -76,77 +71,43 @@ // Each chunk has a used and free portion; however, there are current chunks (serving // potential future metaspace allocations) and non-current chunks. Unused portion of the // former is counted as free, unused portion of the latter counts as waste. -class UsedChunksStatistics { - uintx _num; // Number of chunks - size_t _cap; // Total capacity in words. - size_t _used; // Total used area, in words - size_t _free; // Total free area (unused portions of current chunks), in words - size_t _waste; // Total waste area (unused portions of non-current chunks), in words - size_t _overhead; // Total sum of chunk overheads, in words. - -public: - - UsedChunksStatistics(); - - void reset(); - - uintx num() const { return _num; } - - // Total capacity, in words - size_t cap() const { return _cap; } - - // Total used area, in words - size_t used() const { return _used; } - - // Total free area (unused portions of current chunks), in words - size_t free() const { return _free; } - - // Total waste area (unused portions of non-current chunks), in words - size_t waste() const { return _waste; } +struct UsedChunksStatistics { - // Total area spent in overhead (chunk headers), in words - size_t overhead() const { return _overhead; } - - void add_num(uintx n) { _num += n; } - void add_cap(size_t s) { _cap += s; } - void add_used(size_t s) { _used += s; } - void add_free(size_t s) { _free += s; } - void add_waste(size_t s) { _waste += s; } - void add_overhead(size_t s) { _overhead += s; } - - void add(const UsedChunksStatistics& other); + uintx num; // Number of chunks + size_t cap; // Total capacity in words. + size_t used; // Total used area, in words + size_t free; // Total free area (unused portions of current chunks), in words + size_t waste; // Total waste area (unused portions of non-current chunks), in words + + void reset() { + num = 0; + cap = used = free = waste = 0; + } + + void add(const UsedChunksStatistics& other) { + num = other.num; cap = other.cap; used = other.used; free = other.free; waste = other.waste; + } void print_on(outputStream* st, size_t scale) const; -#ifdef ASSERT - void check_sanity() const; -#endif + DEBUG_ONLY(void check_sanity() const;) }; // UsedChunksStatistics // Class containing statistics for one or more space managers. -class SpaceManagerStatistics { - - UsedChunksStatistics _chunk_stats[NumberOfInUseLists]; - uintx _free_blocks_num; - size_t _free_blocks_cap_words; - -public: - - SpaceManagerStatistics(); - - // Chunk statistics by chunk index - const UsedChunksStatistics& chunk_stats(ChunkIndex index) const { return _chunk_stats[index]; } - UsedChunksStatistics& chunk_stats(ChunkIndex index) { return _chunk_stats[index]; } +struct SpaceManagerStatistics { - uintx free_blocks_num () const { return _free_blocks_num; } - size_t free_blocks_cap_words () const { return _free_blocks_cap_words; } + UsedChunksStatistics chunk_stats[chklvl::NUM_CHUNK_LEVELS]; + uintx free_blocks_num; + size_t free_blocks_word_size; void reset(); - void add_free_blocks_info(uintx num, size_t cap); + void add_free_blocks_info(uintx num, size_t cap) { + free_blocks_num += num; free_blocks_word_size += cap; + } - // Returns total chunk statistics over all chunk types. + // Returns total chunk statistics over all chunk levels. UsedChunksStatistics totals() const; void add(const SpaceManagerStatistics& other); @@ -155,25 +116,19 @@ }; // SpaceManagerStatistics -class ClassLoaderMetaspaceStatistics { +struct ClassLoaderMetaspaceStatistics { - SpaceManagerStatistics _sm_stats[Metaspace::MetadataTypeCount]; - -public: - - ClassLoaderMetaspaceStatistics(); - - const SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType) const { return _sm_stats[mdType]; } - SpaceManagerStatistics& sm_stats(Metaspace::MetadataType mdType) { return _sm_stats[mdType]; } - - const SpaceManagerStatistics& nonclass_sm_stats() const { return sm_stats(Metaspace::NonClassType); } - SpaceManagerStatistics& nonclass_sm_stats() { return sm_stats(Metaspace::NonClassType); } - const SpaceManagerStatistics& class_sm_stats() const { return sm_stats(Metaspace::ClassType); } - SpaceManagerStatistics& class_sm_stats() { return sm_stats(Metaspace::ClassType); } - - void reset(); + SpaceManagerStatistics sm_stats[Metaspace::MetadataTypeCount]; - void add(const ClassLoaderMetaspaceStatistics& other); + void reset() { + sm_stats[Metaspace::ClassType].reset(); + sm_stats[Metaspace::NonClassType].reset(); + } + + void add(const ClassLoaderMetaspaceStatistics& other) { + sm_stats[Metaspace::ClassType].add(other.sm_stats[Metaspace::ClassType]); + sm_stats[Metaspace::NonClassType].add(other.sm_stats[Metaspace::NonClassType]); + } // Returns total space manager statistics for both class and non-class metaspace SpaceManagerStatistics totals() const; --- old/src/hotspot/share/memory/metaspace/spaceManager.cpp 2019-07-22 11:08:06.793705169 +0200 +++ new/src/hotspot/share/memory/metaspace/spaceManager.cpp 2019-07-22 11:08:06.577702857 +0200 @@ -34,494 +34,179 @@ #include "runtime/atomic.hpp" #include "runtime/init.hpp" #include "services/memoryService.hpp" +#include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" namespace metaspace { -#define assert_counter(expected_value, real_value, msg) \ - assert( (expected_value) == (real_value), \ - "Counter mismatch (%s): expected " SIZE_FORMAT \ - ", but got: " SIZE_FORMAT ".", msg, expected_value, \ - real_value); - -// SpaceManager methods - -size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) { - size_t chunk_sizes[] = { - specialized_chunk_size(is_class_space), - small_chunk_size(is_class_space), - medium_chunk_size(is_class_space) - }; - - // Adjust up to one of the fixed chunk sizes ... - for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) { - if (requested <= chunk_sizes[i]) { - return chunk_sizes[i]; - } - } +// Given a requested allocation size, in words, returns the minimum size, in words, of an allocation from metaspace. +// A metaspace allocation must be large enough to hold a Metablock. This is because deallocated allocations +// are kept in the block freelist. +static size_t get_allocation_word_size(size_t requested_word_size) { - // ... or return the size as a humongous chunk. - return requested; -} + size_t byte_size = requested_word_size * BytesPerWord; + byte_size = MAX2(byte_size, sizeof(Metablock)); + byte_size = align_up(byte_size, Metachunk::allocation_alignment_bytes); -size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const { - return adjust_initial_chunk_size(requested, is_class()); -} + const size_t word_size = byte_size / BytesPerWord; + assert(word_size * BytesPerWord == word_size, "Size problem"); -size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const { - size_t requested; - - if (is_class()) { - switch (type) { - case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break; - case Metaspace::UnsafeAnonymousMetaspaceType: requested = ClassSpecializedChunk; break; - case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break; - default: requested = ClassSmallChunk; break; - } - } else { - switch (type) { - case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break; - case Metaspace::UnsafeAnonymousMetaspaceType: requested = SpecializedChunk; break; - case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break; - default: requested = SmallChunk; break; - } - } - - // Adjust to one of the fixed chunk sizes (unless humongous) - const size_t adjusted = adjust_initial_chunk_size(requested); - - assert(adjusted != 0, "Incorrect initial chunk size. Requested: " - SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted); - - return adjusted; + return word_size; } -void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { - - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - st->print("SpaceManager: " UINTX_FORMAT " %s chunks.", - num_chunks_by_type(i), chunk_size_name(i)); - } - - chunk_manager()->locked_print_free_chunks(st); -} +// Given a requested word size, will allocate a chunk large enough to at least fit that +// size, but may be larger according to the rules in the ChunkAllocSequence. +// Updates counters and adds the chunk to the head of the chunk list. +Metachunk* SpaceManager::allocate_chunk_to_fit(size_t requested_word_size) { -size_t SpaceManager::calc_chunk_size(size_t word_size) { + guarantee(requested_word_size < chklvl::MAX_CHUNK_WORD_SIZE, + "Requested size too large (" SIZE_FORMAT ").", requested_word_size); - // Decide between a small chunk and a medium chunk. Up to - // _small_chunk_limit small chunks can be allocated. - // After that a medium chunk is preferred. - size_t chunk_word_size; - - // Special case for unsafe anonymous metadata space. - // UnsafeAnonymous metadata space is usually small since it is used for - // class loader data's whose life cycle is governed by one class such as an - // unsafe anonymous class. The majority within 1K - 2K range and - // rarely about 4K (64-bits JVM). - // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation - // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4) - // reduces space waste from 60+% to around 30%. - if ((_space_type == Metaspace::UnsafeAnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) && - _mdtype == Metaspace::NonClassType && - num_chunks_by_type(SpecializedIndex) < anon_and_delegating_metadata_specialize_chunk_limit && - word_size + Metachunk::overhead() <= SpecializedChunk) { - return SpecializedChunk; - } + const chklvl_t min_level = chklvl::level_fitting_word_size(requested_word_size); + chklvl_t max_level = _chunk_alloc_sequence->get_next_chunk_level(_num_chunks_total); - if (num_chunks_by_type(MediumIndex) == 0 && - num_chunks_by_type(SmallIndex) < small_chunk_limit) { - chunk_word_size = (size_t) small_chunk_size(); - if (word_size + Metachunk::overhead() > small_chunk_size()) { - chunk_word_size = medium_chunk_size(); - } - } else { - chunk_word_size = medium_chunk_size(); + if (max_level < min_level) { + max_level = min_level; } - // Might still need a humongous chunk. Enforce - // humongous allocations sizes to be aligned up to - // the smallest chunk size. - size_t if_humongous_sized_chunk = - align_up(word_size + Metachunk::overhead(), - smallest_chunk_size()); - chunk_word_size = - MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); - - assert(!SpaceManager::is_humongous(word_size) || - chunk_word_size == if_humongous_sized_chunk, - "Size calculation is wrong, word_size " SIZE_FORMAT - " chunk_word_size " SIZE_FORMAT, - word_size, chunk_word_size); - Log(gc, metaspace, alloc) log; - if (log.is_trace() && SpaceManager::is_humongous(word_size)) { - log.trace("Metadata humongous allocation:"); - log.trace(" word_size " PTR_FORMAT, word_size); - log.trace(" chunk_word_size " PTR_FORMAT, chunk_word_size); - log.trace(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); - } - return chunk_word_size; -} + Metachunk* c = _chunk_manager->get_chunk(min_level, max_level); + assert(c != NULL, "Could not get a chunk"); + assert(c->level() >= min_level && c->level() <= max_level, "Sanity"); -void SpaceManager::track_metaspace_memory_usage() { - if (is_init_completed()) { - if (is_class()) { - MemoryService::track_compressed_class_memory_usage(); - } - MemoryService::track_metaspace_memory_usage(); - } -} + _num_chunks_by_level[c->level()] ++; + _num_chunks_total ++; -MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { - assert_lock_strong(_lock); - assert(vs_list()->current_virtual_space() != NULL, - "Should have been set"); - assert(current_chunk() == NULL || - current_chunk()->allocate(word_size) == NULL, - "Don't need to expand"); - MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); - - if (log_is_enabled(Trace, gc, metaspace, freelist)) { - size_t words_left = 0; - size_t words_used = 0; - if (current_chunk() != NULL) { - words_left = current_chunk()->free_word_size(); - words_used = current_chunk()->used_word_size(); - } - log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", - word_size, words_used, words_left); - } + _chunks.add(c); + _current_chunk = c; - // Get another chunk - size_t chunk_word_size = calc_chunk_size(word_size); - Metachunk* next = get_new_chunk(chunk_word_size); - - MetaWord* mem = NULL; - - // If a chunk was available, add it to the in-use chunk list - // and do an allocation from it. - if (next != NULL) { - // Add to this manager's list of chunks in use. - // If the new chunk is humongous, it was created to serve a single large allocation. In that - // case it usually makes no sense to make it the current chunk, since the next allocation would - // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly - // good chunk which could be used for more normal allocations. - bool make_current = true; - if (next->get_chunk_type() == HumongousIndex && - current_chunk() != NULL) { - make_current = false; - } - add_chunk(next, make_current); - mem = next->allocate(word_size); - } + return c; - // Track metaspace memory usage statistic. - track_metaspace_memory_usage(); - - return mem; } -void SpaceManager::print_on(outputStream* st) const { - SpaceManagerStatistics stat; - add_to_statistics(&stat); // will lock _lock. - stat.print_on(st, 1*K, false); +void SpaceManager::create_block_freelist() { + assert(_block_freelist == NULL, "Only call once"); + _block_freelist = new BlockFreelist(); } -SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, - Metaspace::MetaspaceType space_type,// - Mutex* lock) : - _lock(lock), - _mdtype(mdtype), - _space_type(space_type), - _chunk_list(NULL), - _current_chunk(NULL), - _overhead_words(0), - _capacity_words(0), - _used_words(0), - _block_freelists(NULL) { - Metadebug::init_allocation_fail_alot_count(); - memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type)); - log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); -} - -void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) { - - assert_lock_strong(MetaspaceExpand_lock); - - _capacity_words += new_chunk->word_size(); - _overhead_words += Metachunk::overhead(); - DEBUG_ONLY(new_chunk->verify()); - _num_chunks_by_type[new_chunk->get_chunk_type()] ++; - - // Adjust global counters: - MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size()); - MetaspaceUtils::inc_overhead(mdtype(), Metachunk::overhead()); -} - -void SpaceManager::account_for_allocation(size_t words) { - // Note: we should be locked with the ClassloaderData-specific metaspace lock. - // We may or may not be locked with the global metaspace expansion lock. - assert_lock_strong(lock()); - - // Add to the per SpaceManager totals. This can be done non-atomically. - _used_words += words; - - // Adjust global counters. This will be done atomically. - MetaspaceUtils::inc_used(mdtype(), words); -} - -void SpaceManager::account_for_spacemanager_death() { - - assert_lock_strong(MetaspaceExpand_lock); - - MetaspaceUtils::dec_capacity(mdtype(), _capacity_words); - MetaspaceUtils::dec_overhead(mdtype(), _overhead_words); - MetaspaceUtils::dec_used(mdtype(), _used_words); +SpaceManager::SpaceManager(ChunkManager* chunk_manager, const ChunkAllocSequence* alloc_sequence, Mutex* lock) + : _lock(lock), + _chunk_manager(chunk_manager), + _chunk_alloc_sequence(alloc_sequence), + _chunks(), + _current_chunk(NULL), + _block_freelist(NULL), + _num_chunks_by_level { 0 }, + _num_chunks_total(0) +{ } SpaceManager::~SpaceManager() { - // This call this->_lock which can't be done while holding MetaspaceExpand_lock - DEBUG_ONLY(verify_metrics()); + assert_lock_strong(lock()); MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); - account_for_spacemanager_death(); - - Log(gc, metaspace, freelist) log; - if (log.is_trace()) { - log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); - ResourceMark rm; - LogStream ls(log.trace()); - locked_print_chunks_in_use_on(&ls); - if (block_freelists() != NULL) { - block_freelists()->print_on(&ls); - } + // Return all chunks to our chunk manager. + // Note: this destroys the _chunks list. + Metachunk* c = _chunks.first(); + Metachunk* c2 = NULL; + while(c) { + c2 = c->next(); + _chunk_manager->return_chunk(c); + c = c2; } - // Add all the chunks in use by this space manager - // to the global list of free chunks. - - // Follow each list of chunks-in-use and add them to the - // free lists. Each list is NULL terminated. - chunk_manager()->return_chunk_list(chunk_list()); -#ifdef ASSERT - _chunk_list = NULL; - _current_chunk = NULL; -#endif - #ifdef ASSERT EVERY_NTH(VerifyMetaspaceInterval) - chunk_manager()->locked_verify(true); + chunk_manager()->verify(true); END_EVERY_NTH #endif - if (_block_freelists != NULL) { - delete _block_freelists; - } + delete _block_freelist; + } -void SpaceManager::deallocate(MetaWord* p, size_t word_size) { - assert_lock_strong(lock()); - // Allocations and deallocations are in raw_word_size - size_t raw_word_size = get_allocation_word_size(word_size); - // Lazily create a block_freelist - if (block_freelists() == NULL) { - _block_freelists = new BlockFreelist(); +// The current chunk is unable to service a request. The remainder of the chunk is +// chopped into blocks and fed into the _block_freelists, in the hope of later reuse. +void SpaceManager::retire_current_chunk() { + Metachunk* c = _current_chunk; + assert(c != NULL, "Sanity"); + assert(c->used_words() > 0, "Why do we retire an empty chunk?"); + size_t remaining_words = c->free_below_committed_words(); + if (remaining_words >= SmallBlocks::small_block_min_size()) { + bool did_hit_limit = false; + MetaWord* ptr = c->allocate(remaining_words, &did_hit_limit); + assert(ptr != NULL && did_hit_limit == false, "Should have worked"); + deallocate(ptr, remaining_words); } - block_freelists()->return_block(p, raw_word_size); - DEBUG_ONLY(Atomic::inc(&(g_internal_statistics.num_deallocs))); } -// Adds a chunk to the list of chunks in use. -void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { +// Allocate memory from Metaspace. +// 1) Attempt to allocate from the dictionary of deallocated blocks. +// 2) Failing that, attempt to allocate from the current chunk. If this +// fails because the chunk needed to be committed and we hit a commit limit, return NULL. +// 3) Attempt to get a new chunk and allocate from that chunk. Again, we may hit a commit +// limit, in which case we return NULL. +MetaWord* SpaceManager::allocate(size_t requested_word_size) { - assert_lock_strong(_lock); - assert(new_chunk != NULL, "Should not be NULL"); - assert(new_chunk->next() == NULL, "Should not be on a list"); - - new_chunk->reset_empty(); - - // Find the correct list and and set the current - // chunk for that list. - ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size()); - - if (make_current) { - // If we are to make the chunk current, retire the old current chunk and replace - // it with the new chunk. - retire_current_chunk(); - set_current_chunk(new_chunk); - } + MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); - // Add the new chunk at the head of its respective chunk list. - new_chunk->set_next(_chunk_list); - _chunk_list = new_chunk; - - // Adjust counters. - account_for_new_chunk(new_chunk); - - assert(new_chunk->is_empty(), "Not ready for reuse"); - Log(gc, metaspace, freelist) log; - if (log.is_trace()) { - log.trace("SpaceManager::added chunk: "); - ResourceMark rm; - LogStream ls(log.trace()); - new_chunk->print_on(&ls); - chunk_manager()->locked_print_free_chunks(&ls); - } -} + const size_t word_size = align_up(word_size, get_allocation_word_size(requested_word_size)); -void SpaceManager::retire_current_chunk() { - if (current_chunk() != NULL) { - size_t remaining_words = current_chunk()->free_word_size(); - if (remaining_words >= SmallBlocks::small_block_min_size()) { - MetaWord* ptr = current_chunk()->allocate(remaining_words); - deallocate(ptr, remaining_words); - account_for_allocation(remaining_words); - } - } -} + MetaWord* p = NULL; -Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) { - // Get a chunk from the chunk freelist - Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size); - - if (next == NULL) { - next = vs_list()->get_new_chunk(chunk_word_size, - medium_chunk_bunch()); - } + bool did_hit_limit = false; - Log(gc, metaspace, alloc) log; - if (log.is_trace() && next != NULL && - SpaceManager::is_humongous(next->word_size())) { - log.trace(" new humongous chunk word size " PTR_FORMAT, next->word_size()); + // Allocate first chunk if needed. + if (_current_chunk == NULL) { + Metachunk* c = allocate_chunk_to_fit(word_size); + assert(c != NULL && _chunks.size() == 1 && c == _current_chunk, "Should be"); } - return next; -} - -MetaWord* SpaceManager::allocate(size_t word_size) { - MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); - size_t raw_word_size = get_allocation_word_size(word_size); - BlockFreelist* fl = block_freelists(); - MetaWord* p = NULL; + // 1) Attempt to allocate from the dictionary of deallocated blocks. // Allocation from the dictionary is expensive in the sense that // the dictionary has to be searched for a size. Don't allocate // from the dictionary until it starts to get fat. Is this // a reasonable policy? Maybe an skinny dictionary is fast enough // for allocations. Do some profiling. JJJ - if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { - p = fl->get_block(raw_word_size); + if (_block_freelist != NULL && _block_freelist->total_size() > constants::allocation_from_dictionary_limit) { + p = _block_freelist->get_block(word_size); if (p != NULL) { DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs_from_deallocated_blocks)); } } - if (p == NULL) { - p = allocate_work(raw_word_size); - } -#ifdef ASSERT - EVERY_NTH(VerifyMetaspaceInterval) - verify_metrics_locked(); - END_EVERY_NTH -#endif - - return p; -} - -// Returns the address of spaced allocated for "word_size". -// This methods does not know about blocks (Metablocks) -MetaWord* SpaceManager::allocate_work(size_t word_size) { - assert_lock_strong(lock()); -#ifdef ASSERT - if (Metadebug::test_metadata_failure()) { - return NULL; + // 2) Attempt to allocate from the current chunk. + if (p == NULL && !did_hit_limit) { + p = _current_chunk->allocate(word_size, &did_hit_limit); } -#endif - // Is there space in the current chunk? - MetaWord* result = NULL; - if (current_chunk() != NULL) { - result = current_chunk()->allocate(word_size); - } + // 3) Attempt to get a new chunk and allocate from that chunk. + if (p == NULL && !did_hit_limit) { - if (result == NULL) { - result = grow_and_allocate(word_size); - } + // Old chunk is too small to hold requested size? + assert(_current_chunk->free_words() < word_size, "Sanity"); - if (result != NULL) { - account_for_allocation(word_size); - } + // Retire the old chunk. This will put all remainder space (committed + // space only) into the block freelist. + retire_current_chunk(); + assert(_current_chunk->free_below_committed_words() == 0, "Sanity"); - return result; -} + // Allocate a new chunk. + Metachunk* c = allocate_chunk_to_fit(word_size); + assert(c != NULL && _chunks.size() > 0 && c == _current_chunk, "Should be"); -void SpaceManager::verify() { - Metachunk* curr = chunk_list(); - while (curr != NULL) { - DEBUG_ONLY(do_verify_chunk(curr);) - assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use."); - curr = curr->next(); - } -} - -void SpaceManager::verify_chunk_size(Metachunk* chunk) { - assert(is_humongous(chunk->word_size()) || - chunk->word_size() == medium_chunk_size() || - chunk->word_size() == small_chunk_size() || - chunk->word_size() == specialized_chunk_size(), - "Chunk size is wrong"); - return; -} + p = _current_chunk->allocate(word_size, &did_hit_limit); -void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const { - assert_lock_strong(lock()); - Metachunk* chunk = chunk_list(); - while (chunk != NULL) { - UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type()); - chunk_stat.add_num(1); - chunk_stat.add_cap(chunk->word_size()); - chunk_stat.add_overhead(Metachunk::overhead()); - chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead()); - if (chunk != current_chunk()) { - chunk_stat.add_waste(chunk->free_word_size()); - } else { - chunk_stat.add_free(chunk->free_word_size()); - } - chunk = chunk->next(); - } - if (block_freelists() != NULL) { - out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size()); } -} - -void SpaceManager::add_to_statistics(SpaceManagerStatistics* out) const { - MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); - add_to_statistics_locked(out); -} - -#ifdef ASSERT -void SpaceManager::verify_metrics_locked() const { - assert_lock_strong(lock()); - - SpaceManagerStatistics stat; - add_to_statistics_locked(&stat); - - UsedChunksStatistics chunk_stats = stat.totals(); - DEBUG_ONLY(chunk_stats.check_sanity()); + assert(p != NULL || (p == NULL && did_hit_limit), "Sanity"); - assert_counter(_capacity_words, chunk_stats.cap(), "SpaceManager::_capacity_words"); - assert_counter(_used_words, chunk_stats.used(), "SpaceManager::_used_words"); - assert_counter(_overhead_words, chunk_stats.overhead(), "SpaceManager::_overhead_words"); -} + return p; -void SpaceManager::verify_metrics() const { - MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag); - verify_metrics_locked(); } -#endif // ASSERT } // namespace metaspace --- old/src/hotspot/share/memory/metaspace/spaceManager.hpp 2019-07-22 11:08:07.293710522 +0200 +++ new/src/hotspot/share/memory/metaspace/spaceManager.hpp 2019-07-22 11:08:07.077708210 +0200 @@ -28,206 +28,94 @@ #include "memory/allocation.hpp" #include "memory/metaspace.hpp" #include "memory/metaspace/blockFreelist.hpp" -#include "memory/metaspace/metaspaceCommon.hpp" +#include "memory/metaspace/chunkAllocSequence.hpp" +#include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/metaspaceStatistics.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" class outputStream; class Mutex; namespace metaspace { -// SpaceManager - used by Metaspace to handle allocations -class SpaceManager : public CHeapObj { - friend class ::ClassLoaderMetaspace; - friend class Metadebug; - private: +// The SpaceManager: +// - keeps a list of chunks-in-use by the class loader, as well as a current chunk used +// to allocate from +// - keeps a dictionary of free MetaBlocks. Those can be remnants of a retired chunk or +// allocations which were not needed anymore for some reason (e.g. releasing half-allocated +// structures when class loading fails) - // protects allocations +class SpaceManager : public CHeapObj { + + // Lock handed down from the associated ClassLoaderData. + // Protects allocations from this space. Mutex* const _lock; - // Type of metadata allocated. - const Metaspace::MetadataType _mdtype; + // The chunk manager to allocate chunks from. + ChunkManager* const _chunk_manager; - // Type of metaspace - const Metaspace::MetaspaceType _space_type; + // The chunk allocation strategy to use. + const ChunkAllocSequence* const _chunk_alloc_sequence; // List of chunks in use by this SpaceManager. Allocations // are done from the current chunk. The list is used for deallocating // chunks when the SpaceManager is freed. - Metachunk* _chunk_list; + MetachunkList _chunks; Metachunk* _current_chunk; - enum { - - // Maximum number of small chunks to allocate to a SpaceManager - small_chunk_limit = 4, + // Prematurely released metablocks. + BlockFreelist* _block_freelist; - // Maximum number of specialize chunks to allocate for anonymous and delegating - // metadata space to a SpaceManager - anon_and_delegating_metadata_specialize_chunk_limit = 4, - allocation_from_dictionary_limit = 4 * K + // Statistics - }; - - // Some running counters, but lets keep their number small to not add to much to - // the per-classloader footprint. + // Running counters. // Note: capacity = used + free + waste + overhead. We do not keep running counters for // free and waste. Their sum can be deduced from the three other values. - size_t _overhead_words; - size_t _capacity_words; - size_t _used_words; - uintx _num_chunks_by_type[NumberOfInUseLists]; - - // Free lists of blocks are per SpaceManager since they - // are assumed to be in chunks in use by the SpaceManager - // and all chunks in use by a SpaceManager are freed when - // the class loader using the SpaceManager is collected. - BlockFreelist* _block_freelists; - - private: - // Accessors - Metachunk* chunk_list() const { return _chunk_list; } - - BlockFreelist* block_freelists() const { return _block_freelists; } - - Metaspace::MetadataType mdtype() { return _mdtype; } + // size_t _overhead_words; + // size_t _capacity_words; + // size_t _used_words; + uintx _num_chunks_by_level[chklvl::NUM_CHUNK_LEVELS]; + uintx _num_chunks_total; + + + Mutex* lock() const { return _lock; } + ChunkManager* chunk_manager() const { return _chunk_manager; } + const ChunkAllocSequence* chunk_alloc_sequence() const { return _chunk_alloc_sequence; } - VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } - ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } + BlockFreelist* block_freelist() const { return _block_freelist; } + void create_block_freelist(); - Metachunk* current_chunk() const { return _current_chunk; } - void set_current_chunk(Metachunk* v) { - _current_chunk = v; - } - - Metachunk* find_current_chunk(size_t word_size); - - // Add chunk to the list of chunks in use - void add_chunk(Metachunk* v, bool make_current); + // The current chunk is unable to service a request. The remainder of the chunk is + // chopped into blocks and fed into the _block_freelists, in the hope of later reuse. void retire_current_chunk(); - Mutex* lock() const { return _lock; } + // Given a requested word size, will allocate a chunk large enough to at least fit that + // size, but may be larger according to the rules in the ChunkAllocSequence. + // Updates counters and adds the chunk to the head of the chunk list. + Metachunk* allocate_chunk_to_fit(size_t requested_word_size); - // Adds to the given statistic object. Expects to be locked with lock(). - void add_to_statistics_locked(SpaceManagerStatistics* out) const; +public: - // Verify internal counters against the current state. Expects to be locked with lock(). - DEBUG_ONLY(void verify_metrics_locked() const;) + SpaceManager(ChunkManager* chunk_manager, const ChunkAllocSequence* alloc_sequence, Mutex* lock); - public: - SpaceManager(Metaspace::MetadataType mdtype, - Metaspace::MetaspaceType space_type, - Mutex* lock); ~SpaceManager(); - enum ChunkMultiples { - MediumChunkMultiple = 4 - }; - - static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; } - static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; } - static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; } - - static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); } - - // Accessors - bool is_class() const { return _mdtype == Metaspace::ClassType; } - - size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); } - size_t small_chunk_size() const { return small_chunk_size(is_class()); } - size_t medium_chunk_size() const { return medium_chunk_size(is_class()); } - - size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); } - - size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; } - - bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } - - size_t capacity_words() const { return _capacity_words; } - size_t used_words() const { return _used_words; } - size_t overhead_words() const { return _overhead_words; } - - // Adjust local, global counters after a new chunk has been added. - void account_for_new_chunk(const Metachunk* new_chunk); - - // Adjust local, global counters after space has been allocated from the current chunk. - void account_for_allocation(size_t words); - - // Adjust global counters just before the SpaceManager dies, after all its chunks - // have been returned to the freelist. - void account_for_spacemanager_death(); - - // Adjust the initial chunk size to match one of the fixed chunk list sizes, - // or return the unadjusted size if the requested size is humongous. - static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space); - size_t adjust_initial_chunk_size(size_t requested) const; - - // Get the initial chunks size for this metaspace type. - size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; - - // Todo: remove this once we have counters by chunk type. - uintx num_chunks_by_type(ChunkIndex chunk_type) const { return _num_chunks_by_type[chunk_type]; } - - Metachunk* get_new_chunk(size_t chunk_word_size); - - // Block allocation and deallocation. - // Allocates a block from the current chunk + // Allocate memory from Metaspace. Will attempt to allocate from the _block_freelists, + // failing that, from the current chunk; failing that, attempt to get a new chunk from + // the associated ChunkManager. MetaWord* allocate(size_t word_size); - // Helper for allocations - MetaWord* allocate_work(size_t word_size); - - // Returns a block to the per manager freelist + // Prematurely returns a metaspace allocation to the _block_freelists because it is not + // needed anymore. void deallocate(MetaWord* p, size_t word_size); - // Based on the allocation size and a minimum chunk size, - // returned chunk size (for expanding space for chunk allocation). - size_t calc_chunk_size(size_t allocation_word_size); - - // Called when an allocation from the current chunk fails. - // Gets a new chunk (may require getting a new virtual space), - // and allocates from that chunk. - MetaWord* grow_and_allocate(size_t word_size); - - // Notify memory usage to MemoryService. - void track_metaspace_memory_usage(); - - // debugging support. - - void print_on(outputStream* st) const; - void locked_print_chunks_in_use_on(outputStream* st) const; - - void verify(); - void verify_chunk_size(Metachunk* chunk); - - // This adjusts the size given to be greater than the minimum allocation size in - // words for data in metaspace. Esentially the minimum size is currently 3 words. - size_t get_allocation_word_size(size_t word_size) { - size_t byte_size = word_size * BytesPerWord; - - size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); - raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment()); - - size_t raw_word_size = raw_bytes_size / BytesPerWord; - assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); - - return raw_word_size; - } - - // Adds to the given statistic object. - void add_to_statistics(SpaceManagerStatistics* out) const; - - // Verify internal counters against the current state. - DEBUG_ONLY(void verify_metrics() const;) + // Run verifications. slow=true: verify chunk-internal integrity too. + DEBUG_ONLY(void locked_verify(bool slow) const;) }; - } // namespace metaspace #endif // SHARE_MEMORY_METASPACE_SPACEMANAGER_HPP --- old/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp 2019-07-22 11:08:07.789715832 +0200 +++ new/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp 2019-07-22 11:08:07.573713519 +0200 @@ -24,424 +24,138 @@ #include "precompiled.hpp" -#include "logging/log.hpp" -#include "logging/logStream.hpp" #include "memory/metaspace.hpp" #include "memory/metaspace/chunkManager.hpp" -#include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/metaspaceCommon.hpp" +#include "memory/metaspace/counter.hpp" +#include "memory/metaspace/commitLimiter.hpp" +#include "memory/metaspace/counter.hpp" #include "memory/metaspace/virtualSpaceList.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" -#include "runtime/orderAccess.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/safepoint.hpp" + namespace metaspace { +// Create a new, empty, expandable list. +VirtualSpaceList::VirtualSpaceList(const char* name, CommitLimiter* commit_limiter) + : _first_node(NULL), + _current_node(NULL), + _can_expand(true), + _name(name), + _commit_limiter(commit_limiter), + _reserved_words_counter(), + _committed_words_counter() +{ + // Create the first node right now. Nothing gets committed yet though. + create_new_node(); +} + +// Create a new list. The list will contain one node only, which uses the given ReservedSpace. +// It will be not expandable beyond that first node. +VirtualSpaceList::VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter) +: _first_node(NULL), + _current_node(NULL), + _can_expand(false), + _name(name), + _commit_limiter(commit_limiter), + _reserved_words_counter(), + _committed_words_counter() +{ + // Create the first node spanning the existing ReservedSpace. This will be the only node created + // for this list since we cannot expand. + VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(rs, _commit_limiter, + &_reserved_words_counter, &_committed_words_counter); + assert(vsn != NULL, "node creation failed"); + _first_node = _current_node = vsn; + _current_node->set_next(NULL); +} VirtualSpaceList::~VirtualSpaceList() { - VirtualSpaceListIterator iter(virtual_space_list()); - while (iter.repeat()) { - VirtualSpaceNode* vsl = iter.get_next(); - delete vsl; + // Note: normally, there is no reason ever to delete a vslist since they are + // global objects, but for gtests it makes sense to allow this. + VirtualSpaceNode* vsn = _first_node; + VirtualSpaceNode* vsn2 = vsn; + while (vsn != NULL) { + vsn2 = vsn->next(); + delete vsn; + vsn = vsn2; } } -void VirtualSpaceList::inc_reserved_words(size_t v) { - assert_lock_strong(MetaspaceExpand_lock); - _reserved_words = _reserved_words + v; -} -void VirtualSpaceList::dec_reserved_words(size_t v) { - assert_lock_strong(MetaspaceExpand_lock); - _reserved_words = _reserved_words - v; -} - -#define assert_committed_below_limit() \ - assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \ - "Too much committed memory. Committed: " SIZE_FORMAT \ - " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ - MetaspaceUtils::committed_bytes(), MaxMetaspaceSize); - -void VirtualSpaceList::inc_committed_words(size_t v) { - assert_lock_strong(MetaspaceExpand_lock); - _committed_words = _committed_words + v; - - assert_committed_below_limit(); -} -void VirtualSpaceList::dec_committed_words(size_t v) { - assert_lock_strong(MetaspaceExpand_lock); - _committed_words = _committed_words - v; - - assert_committed_below_limit(); -} - -void VirtualSpaceList::inc_virtual_space_count() { - assert_lock_strong(MetaspaceExpand_lock); - _virtual_space_count++; -} +// Create a new node and append it to the list. After +// this function, _current_node shall point to a new empty node. +// List must be expandable for this to work. +void VirtualSpaceList::create_new_node() { + assert(_can_expand, "List is not expandable"); + VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(constants::virtual_space_node_default_size, + _commit_limiter, + &_reserved_words_counter, &_committed_words_counter); + assert(vsn != NULL, "node creation failed"); + vsn->set_next(_first_node); + _first_node = _current_node = vsn; +} + +// Allocate a root chunk from this list. +// Note: this just returns a chunk whose memory is reserved; no memory is committed yet. +// Hence, before using this chunk, it must be committed. +// Also, no limits are checked, since no committing takes place. +Metachunk* VirtualSpaceList::allocate_root_chunk() { + + assert(_current_node != NULL, "Sanity"); + + Metachunk* c = _current_node->allocate_root_chunk(); + + if (c == NULL) { + + // The current node is fully used up. + + // Since all allocations from a VirtualSpaceNode happen in root-chunk-size units, + // we should never have som + assert(_current_node->used_words() == _current_node->word_size(), "Sanity"); -void VirtualSpaceList::dec_virtual_space_count() { - assert_lock_strong(MetaspaceExpand_lock); - _virtual_space_count--; -} - -// Walk the list of VirtualSpaceNodes and delete -// nodes with a 0 container_count. Remove Metachunks in -// the node from their respective freelists. -void VirtualSpaceList::purge(ChunkManager* chunk_manager) { - assert_lock_strong(MetaspaceExpand_lock); - // Don't use a VirtualSpaceListIterator because this - // list is being changed and a straightforward use of an iterator is not safe. - VirtualSpaceNode* prev_vsl = virtual_space_list(); - VirtualSpaceNode* next_vsl = prev_vsl; - int num_purged_nodes = 0; - while (next_vsl != NULL) { - VirtualSpaceNode* vsl = next_vsl; - DEBUG_ONLY(vsl->verify(false);) - next_vsl = vsl->next(); - // Don't free the current virtual space since it will likely - // be needed soon. - if (vsl->container_count() == 0 && vsl != current_virtual_space()) { - log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT - ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs()); - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged)); - // Unlink it from the list - if (prev_vsl == vsl) { - // This is the case of the current node being the first node. - assert(vsl == virtual_space_list(), "Expected to be the first node"); - set_virtual_space_list(vsl->next()); - } else { - prev_vsl->set_next(vsl->next()); - } - - vsl->purge(chunk_manager); - dec_reserved_words(vsl->reserved_words()); - dec_committed_words(vsl->committed_words()); - dec_virtual_space_count(); - delete vsl; - num_purged_nodes ++; + if (_can_expand) { + create_new_node(); } else { - prev_vsl = vsl; + return NULL; // We cannot expand this list. } } - // Verify list -#ifdef ASSERT - if (num_purged_nodes > 0) { - verify(false); - } -#endif -} - + Metachunk* c = _current_node->allocate_root_chunk(); -// This function looks at the mmap regions in the metaspace without locking. -// The chunks are added with store ordering and not deleted except for at -// unloading time during a safepoint. -VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) { - // List should be stable enough to use an iterator here because removing virtual - // space nodes is only allowed at a safepoint. - if (is_within_envelope((address)ptr)) { - VirtualSpaceListIterator iter(virtual_space_list()); - while (iter.repeat()) { - VirtualSpaceNode* vsn = iter.get_next(); - if (vsn->contains(ptr)) { - return vsn; - } - } - } - return NULL; -} + assert(c != NULL, "This should have worked"); -void VirtualSpaceList::retire_current_virtual_space() { - assert_lock_strong(MetaspaceExpand_lock); + return c; - VirtualSpaceNode* vsn = current_virtual_space(); - - ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : - Metaspace::chunk_manager_metadata(); - - vsn->retire(cm); -} - -VirtualSpaceList::VirtualSpaceList(size_t word_size) : - _virtual_space_list(NULL), - _current_virtual_space(NULL), - _is_class(false), - _reserved_words(0), - _committed_words(0), - _virtual_space_count(0), - _envelope_lo((address)max_uintx), - _envelope_hi(NULL) { - MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); - create_new_virtual_space(word_size); -} - -VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : - _virtual_space_list(NULL), - _current_virtual_space(NULL), - _is_class(true), - _reserved_words(0), - _committed_words(0), - _virtual_space_count(0), - _envelope_lo((address)max_uintx), - _envelope_hi(NULL) { - MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); - VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs); - bool succeeded = class_entry->initialize(); - if (succeeded) { - expand_envelope_to_include_node(class_entry); - // ensure lock-free iteration sees fully initialized node - OrderAccess::storestore(); - link_vs(class_entry); - } } -size_t VirtualSpaceList::free_bytes() { - return current_virtual_space()->free_words_in_vs() * BytesPerWord; -} - -// Allocate another meta virtual space and add it to the list. -bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { - assert_lock_strong(MetaspaceExpand_lock); - - if (is_class()) { - assert(false, "We currently don't support more than one VirtualSpace for" - " the compressed class space. The initialization of the" - " CCS uses another code path and should not hit this path."); - return false; - } - - if (vs_word_size == 0) { - assert(false, "vs_word_size should always be at least _reserve_alignment large."); - return false; - } - - // Reserve the space - size_t vs_byte_size = vs_word_size * BytesPerWord; - assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); - - // Allocate the meta virtual space and initialize it. - VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size); - if (!new_entry->initialize()) { - delete new_entry; - return false; - } else { - assert(new_entry->reserved_words() == vs_word_size, - "Reserved memory size differs from requested memory size"); - expand_envelope_to_include_node(new_entry); - // ensure lock-free iteration sees fully initialized node - OrderAccess::storestore(); - link_vs(new_entry); - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created)); - return true; - } - - DEBUG_ONLY(verify(false);) -} - -void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { - if (virtual_space_list() == NULL) { - set_virtual_space_list(new_entry); - } else { - current_virtual_space()->set_next(new_entry); - } - set_current_virtual_space(new_entry); - inc_reserved_words(new_entry->reserved_words()); - inc_committed_words(new_entry->committed_words()); - inc_virtual_space_count(); #ifdef ASSERT - new_entry->mangle(); -#endif - LogTarget(Trace, gc, metaspace) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - VirtualSpaceNode* vsl = current_virtual_space(); - ResourceMark rm; - vsl->print_on(&ls); - } -} - -bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, - size_t min_words, - size_t preferred_words) { - size_t before = node->committed_words(); - - bool result = node->expand_by(min_words, preferred_words); +void VirtualSpaceList::verify(bool slow) const { + assert(_current_node != NULL && _first_node != NULL && _name != NULL, "Sanity"); - size_t after = node->committed_words(); - - // after and before can be the same if the memory was pre-committed. - assert(after >= before, "Inconsistency"); - inc_committed_words(after - before); - - return result; -} - -bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { - assert_is_aligned(min_words, Metaspace::commit_alignment_words()); - assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); - assert(min_words <= preferred_words, "Invalid arguments"); - - const char* const class_or_not = (is_class() ? "class" : "non-class"); - - if (!MetaspaceGC::can_expand(min_words, this->is_class())) { - log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.", - class_or_not); - return false; - } - - size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); - if (allowed_expansion_words < min_words) { - log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).", - class_or_not); - return false; + size_t total_reserved_words = 0; + size_t total_committed_words = 0; + const VirtualSpaceNode* vsn = _first_node; + while (vsn != NULL) { + vsn->verify(slow); + total_reserved_words += vsn->word_size(); + total_committed_words += vsn->committed_words(); + vsn = vsn->next(); } - size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); + _reserved_words_counter.check(total_reserved_words); + _committed_words_counter.check(total_committed_words); - // Commit more memory from the the current virtual space. - bool vs_expanded = expand_node_by(current_virtual_space(), - min_words, - max_expansion_words); - if (vs_expanded) { - log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.", - class_or_not); - return true; - } - log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.", - class_or_not); - retire_current_virtual_space(); - - // Get another virtual space. - size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); - grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); - - if (create_new_virtual_space(grow_vs_words)) { - if (current_virtual_space()->is_pre_committed()) { - // The memory was pre-committed, so we are done here. - assert(min_words <= current_virtual_space()->committed_words(), - "The new VirtualSpace was pre-committed, so it" - "should be large enough to fit the alloc request."); - return true; - } - - return expand_node_by(current_virtual_space(), - min_words, - max_expansion_words); - } - - return false; -} - -// Given a chunk, calculate the largest possible padding space which -// could be required when allocating it. -static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) { - const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class); - if (chunk_type != HumongousIndex) { - // Normal, non-humongous chunks are allocated at chunk size - // boundaries, so the largest padding space required would be that - // minus the smallest chunk size. - const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; - return chunk_word_size - smallest_chunk_size; - } else { - // Humongous chunks are allocated at smallest-chunksize - // boundaries, so there is no padding required. - return 0; - } -} - - -Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { - - // Allocate a chunk out of the current virtual space. - Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); - - if (next != NULL) { - return next; - } - - // The expand amount is currently only determined by the requested sizes - // and not how much committed memory is left in the current virtual space. - - // We must have enough space for the requested size and any - // additional reqired padding chunks. - const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class()); - - size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words()); - size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); - if (min_word_size >= preferred_word_size) { - // Can happen when humongous chunks are allocated. - preferred_word_size = min_word_size; - } - - bool expanded = expand_by(min_word_size, preferred_word_size); - if (expanded) { - next = current_virtual_space()->get_chunk_vs(chunk_word_size); - assert(next != NULL, "The allocation was expected to succeed after the expansion"); - } - - return next; -} - -void VirtualSpaceList::print_on(outputStream* st, size_t scale) const { - st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT, - _virtual_space_count, p2i(_current_virtual_space)); - VirtualSpaceListIterator iter(virtual_space_list()); - while (iter.repeat()) { - st->cr(); - VirtualSpaceNode* node = iter.get_next(); - node->print_on(st, scale); - } -} - -void VirtualSpaceList::print_map(outputStream* st) const { - VirtualSpaceNode* list = virtual_space_list(); - VirtualSpaceListIterator iter(list); - unsigned i = 0; - while (iter.repeat()) { - st->print_cr("Node %u:", i); - VirtualSpaceNode* node = iter.get_next(); - node->print_map(st, this->is_class()); - i ++; - } -} - -// Given a node, expand range such that it includes the node. -void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) { - _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary()); - _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary()); } +#endif +VirtualSpaceList* VirtualSpaceList::_vslist_class = NULL; +VirtualSpaceList* VirtualSpaceList::_vslist_nonclass = NULL; -#ifdef ASSERT -void VirtualSpaceList::verify(bool slow) { - VirtualSpaceNode* list = virtual_space_list(); - VirtualSpaceListIterator iter(list); - size_t reserved = 0; - size_t committed = 0; - size_t node_count = 0; - while (iter.repeat()) { - VirtualSpaceNode* node = iter.get_next(); - if (slow) { - node->verify(true); - } - // Check that the node resides fully within our envelope. - assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi, - "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ").", - node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi)); - reserved += node->reserved_words(); - committed += node->committed_words(); - node_count ++; - } - assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count, - "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT - ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT - ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT ".", - reserved, reserved_words(), committed, committed_words(), - node_count, _virtual_space_count); +static void VirtualSpaceList::initialize(VirtualSpaceList* vslist_class, VirtualSpaceList* vslist_nonclass) { + _vslist_class = vslist_class; + _vslist_nonclass = vslist_nonclass; } -#endif // ASSERT } // namespace metaspace --- old/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp 2019-07-22 11:08:08.281721098 +0200 +++ new/src/hotspot/share/memory/metaspace/virtualSpaceList.hpp 2019-07-22 11:08:08.065718786 +0200 @@ -26,141 +26,93 @@ #define SHARE_MEMORY_METASPACE_VIRTUALSPACELIST_HPP #include "memory/allocation.hpp" +#include "memory/metaspace/counter.hpp" +#include "memory/metaspace/commitLimiter.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" +#include "memory/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" namespace metaspace { class Metachunk; -class ChunkManager; -// List of VirtualSpaces for metadata allocation. -class VirtualSpaceList : public CHeapObj { - friend class VirtualSpaceNode; +class VirtualSpaceList : public public CHeapObj { - enum VirtualSpaceSizes { - VirtualSpaceSize = 256 * K - }; + // Name + const char* const _name; - // Head of the list - VirtualSpaceNode* _virtual_space_list; - // virtual space currently being used for allocations - VirtualSpaceNode* _current_virtual_space; + // Head of the list. + VirtualSpaceNode* _first_node; - // Is this VirtualSpaceList used for the compressed class space - bool _is_class; + // Node currently being used for allocations. + VirtualSpaceNode* _current_node; - // Sum of reserved and committed memory in the virtual spaces - size_t _reserved_words; - size_t _committed_words; + // Whether this list can expand by allocating new nodes. + const bool _can_expand; - // Number of virtual spaces - size_t _virtual_space_count; + // Used to check limits before committing memory. + CommitLimiter* const _commit_limiter; - // Optimization: we keep an address range to quickly exclude pointers - // which are clearly not pointing into metaspace. This is an optimization for - // VirtualSpaceList::contains(). - address _envelope_lo; - address _envelope_hi; + // Statistics - bool is_within_envelope(address p) const { - return p >= _envelope_lo && p < _envelope_hi; - } + // Holds sum of reserved space, in words, over all list nodes. + SizeCounter _reserved_words_counter; - // Given a node, expand range such that it includes the node. - void expand_envelope_to_include_node(const VirtualSpaceNode* node); + // Holds sum of committed space, in words, over all list nodes. + SizeCounter _committed_words_counter; - ~VirtualSpaceList(); + // Create a new node and append it to the list. After + // this function, _current_node shall point to a new empty node. + // List must be expandable for this to work. + void create_new_node(); - VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } +public: - void set_virtual_space_list(VirtualSpaceNode* v) { - _virtual_space_list = v; - } - void set_current_virtual_space(VirtualSpaceNode* v) { - _current_virtual_space = v; - } + // Create a new, empty, expandable list. + VirtualSpaceList(const char* name, CommitLimiter* commit_limiter); - void link_vs(VirtualSpaceNode* new_entry); + // Create a new list. The list will contain one node only, which uses the given ReservedSpace. + // It will be not expandable beyond that first node. + VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter); - // Get another virtual space and add it to the list. This - // is typically prompted by a failed attempt to allocate a chunk - // and is typically followed by the allocation of a chunk. - bool create_new_virtual_space(size_t vs_word_size); + virtual ~VirtualSpaceList(); - // Chunk up the unused committed space in the current - // virtual space and add the chunks to the free list. - void retire_current_virtual_space(); + // Allocate a root chunk from this list. + // Note: this just returns a chunk whose memory is reserved; no memory is committed yet. + // Hence, before using this chunk, it must be committed. + // Also, no limits are checked, since no committing takes place. + Metachunk* allocate_root_chunk(); - DEBUG_ONLY(bool contains_node(const VirtualSpaceNode* node) const;) + DEBUG_ONLY(void verify(bool slow) const;) - public: - VirtualSpaceList(size_t word_size); - VirtualSpaceList(ReservedSpace rs); + //// Statistics //// - size_t free_bytes(); + // Return sum of reserved words in all nodes. + size_t reserved_words() const { return _reserved_words_counter.get(); } - Metachunk* get_new_chunk(size_t chunk_word_size, - size_t suggested_commit_granularity); + // Return sum of committed words in all nodes. + size_t committed_words() const { return _committed_words_counter.get(); } - bool expand_node_by(VirtualSpaceNode* node, - size_t min_words, - size_t preferred_words); + //// Debug stuff //// + DEBUG_ONLY(void verify(bool slow) const;) - bool expand_by(size_t min_words, - size_t preferred_words); - VirtualSpaceNode* current_virtual_space() { - return _current_virtual_space; - } - bool is_class() const { return _is_class; } - bool initialization_succeeded() { return _virtual_space_list != NULL; } +private: - size_t reserved_words() { return _reserved_words; } - size_t reserved_bytes() { return reserved_words() * BytesPerWord; } - size_t committed_words() { return _committed_words; } - size_t committed_bytes() { return committed_words() * BytesPerWord; } + static VirtualSpaceList* _vslist_class; + static VirtualSpaceList* _vslist_nonclass; - void inc_reserved_words(size_t v); - void dec_reserved_words(size_t v); - void inc_committed_words(size_t v); - void dec_committed_words(size_t v); - void inc_virtual_space_count(); - void dec_virtual_space_count(); +public: - VirtualSpaceNode* find_enclosing_space(const void* ptr); - bool contains(const void* ptr) { return find_enclosing_space(ptr) != NULL; } + static VirtualSpaceList* vslist_class() { return _vslist_class; } + static VirtualSpaceList* vslist_nonclass() { return _vslist_nonclass; } - // Unlink empty VirtualSpaceNodes and free it. - void purge(ChunkManager* chunk_manager); + static void initialize(VirtualSpaceList* vslist_class, VirtualSpaceList* vslist_nonclass); - void print_on(outputStream* st) const { print_on(st, K); } - void print_on(outputStream* st, size_t scale) const; - void print_map(outputStream* st) const; - DEBUG_ONLY(void verify(bool slow);) - - class VirtualSpaceListIterator : public StackObj { - VirtualSpaceNode* _virtual_spaces; - public: - VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : - _virtual_spaces(virtual_spaces) {} - - bool repeat() { - return _virtual_spaces != NULL; - } - - VirtualSpaceNode* get_next() { - VirtualSpaceNode* result = _virtual_spaces; - if (_virtual_spaces != NULL) { - _virtual_spaces = _virtual_spaces->next(); - } - return result; - } - }; }; } // namespace metaspace --- old/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp 2019-07-22 11:08:08.773726364 +0200 +++ new/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp 2019-07-22 11:08:08.557724052 +0200 @@ -22,567 +22,323 @@ * */ + #include "precompiled.hpp" #include "logging/log.hpp" -#include "logging/logStream.hpp" + +#include "memory/metaspace/chunkLevel.hpp" +#include "memory/metaspace/chunkTree.hpp" +#include "memory/metaspace/commitLimiter.hpp" +#include "memory/metaspace/constants.hpp" +#include "memory/metaspace/counter.hpp" #include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace.hpp" -#include "memory/metaspace/chunkManager.hpp" -#include "memory/metaspace/metaDebug.hpp" #include "memory/metaspace/metaspaceCommon.hpp" -#include "memory/metaspace/occupancyMap.hpp" +#include "memory/metaspace/runningCounters.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" -#include "memory/virtualspace.hpp" + +#include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" -#include "services/memTracker.hpp" -#include "utilities/copy.hpp" + +#include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" namespace metaspace { -// Decide if large pages should be committed when the memory is reserved. -static bool should_commit_large_pages_when_reserving(size_t bytes) { - if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { - size_t words = bytes / BytesPerWord; - bool is_class = false; // We never reserve large pages for the class space. - if (MetaspaceGC::can_expand(words, is_class) && - MetaspaceGC::allowed_expansion() >= words) { - return true; - } - } - - return false; +#ifdef ASSERT +template +void check_is_aligned_to_commit_granule(T x) { + assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer"); } +#endif -// byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) : - _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) { - assert_is_aligned(bytes, Metaspace::reserve_alignment()); - bool large_pages = should_commit_large_pages_when_reserving(bytes); - _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); - - if (_rs.is_reserved()) { - assert(_rs.base() != NULL, "Catch if we get a NULL address"); - assert(_rs.size() != 0, "Catch if we get a 0 size"); - assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); - assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); +// Given an address range, ensure it is committed. +// +// The range has to be aligned to granule size. +// +// Function will: +// - check how many granules in that region are uncommitted; If all are committed, it +// returns true immediately. +// - check if committing those uncommitted granules would bring us over the commit limit +// (GC threshold, MaxMetaspaceSize). If true, it returns false. +// - commit the memory. +// - mark the range as committed in the commit mask +// +// Returns true if success, false if it did hit a commit limit. +bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) { - MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); - } -} + DEBUG_ONLY(check_is_aligned_to_commit_granule(p);) + DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) + assert_lock_strong(MetaspaceExpand_lock); -void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { - // When a node is purged, lets give it a thorough examination. - DEBUG_ONLY(verify(true);) - Metachunk* chunk = first_chunk(); - Metachunk* invalid_chunk = (Metachunk*) top(); - while (chunk < invalid_chunk ) { - assert(chunk->is_tagged_free(), "Should be tagged free"); - MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); - chunk_manager->remove_chunk(chunk); - chunk->remove_sentinel(); - assert(chunk->next() == NULL && - chunk->prev() == NULL, - "Was not removed from its list"); - chunk = (Metachunk*) next; - } -} + // First calculate how large the committed regions in this range are + const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size); + DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);) -void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const { + // By how much words we would increase commit charge + // were we to commit the given address range completely. + const size_t commit_increase_words = word_size - committed_words_in_range; - if (bottom() == top()) { - return; + if (commit_increase_words == 0) { + return true; // Already fully committed, nothing to do. } - const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; - const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk; - const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk; - - int line_len = 100; - const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size); - line_len = (int)(section_len / spec_chunk_size); - - static const int NUM_LINES = 4; - - char* lines[NUM_LINES]; - for (int i = 0; i < NUM_LINES; i ++) { - lines[i] = (char*)os::malloc(line_len, mtInternal); + // Before committing any more memory, check limits. + if (_commit_limiter->possible_expansion_words() < commit_increase_words) { + return false; } - int pos = 0; - const MetaWord* p = bottom(); - const Metachunk* chunk = (const Metachunk*)p; - const MetaWord* chunk_end = p + chunk->word_size(); - while (p < top()) { - if (pos == line_len) { - pos = 0; - for (int i = 0; i < NUM_LINES; i ++) { - st->fill_to(22); - st->print_raw(lines[i], line_len); - st->cr(); - } - } - if (pos == 0) { - st->print(PTR_FORMAT ":", p2i(p)); - } - if (p == chunk_end) { - chunk = (Metachunk*)p; - chunk_end = p + chunk->word_size(); - } - // line 1: chunk starting points (a dot if that area is a chunk start). - lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' '; - - // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if - // chunk is in use. - const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free(); - if (chunk->word_size() == spec_chunk_size) { - lines[1][pos] = chunk_is_free ? 'x' : 'X'; - } else if (chunk->word_size() == small_chunk_size) { - lines[1][pos] = chunk_is_free ? 's' : 'S'; - } else if (chunk->word_size() == med_chunk_size) { - lines[1][pos] = chunk_is_free ? 'm' : 'M'; - } else if (chunk->word_size() > med_chunk_size) { - lines[1][pos] = chunk_is_free ? 'h' : 'H'; - } else { - ShouldNotReachHere(); - } - - // Line 3: chunk origin - const ChunkOrigin origin = chunk->get_origin(); - lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin; - - // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting, - // but were never used. - lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v'; - p += spec_chunk_size; - pos ++; - } - if (pos > 0) { - for (int i = 0; i < NUM_LINES; i ++) { - st->fill_to(22); - st->print_raw(lines[i], line_len); - st->cr(); - } - } - for (int i = 0; i < NUM_LINES; i ++) { - os::free(lines[i]); + // Commit... + if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) { + vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace."); } -} + log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.", + commit_increase_words * BytesPerWord); -#ifdef ASSERT + // ... tell commit limiter... + _commit_limiter->increase_committed(commit_increase_words); -// Verify counters, all chunks in this list node and the occupancy map. -void VirtualSpaceNode::verify(bool slow) { - log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s).", - (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick")); - // Fast mode: just verify chunk counters and basic geometry - // Slow mode: verify chunks and occupancy map - uintx num_in_use_chunks = 0; - Metachunk* chunk = first_chunk(); - Metachunk* invalid_chunk = (Metachunk*) top(); - - // Iterate the chunks in this node and verify each chunk. - while (chunk < invalid_chunk ) { - if (slow) { - do_verify_chunk(chunk); - } - if (!chunk->is_tagged_free()) { - num_in_use_chunks ++; - } - const size_t s = chunk->word_size(); - // Prevent endless loop on invalid chunk size. - assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT ".", s); - MetaWord* next = ((MetaWord*)chunk) + s; - chunk = (Metachunk*) next; - } - assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT - ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count); - // Also verify the occupancy map. - if (slow) { - occupancy_map()->verify(bottom(), top()); - } -} + // ... update counters in containing vslist ... + _total_committed_words_counter->increment_by(commit_increase_words); -// Verify that all free chunks in this node are ideally merged -// (there not should be multiple small chunks where a large chunk could exist.) -void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() { - Metachunk* chunk = first_chunk(); - Metachunk* invalid_chunk = (Metachunk*) top(); - // Shorthands. - const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord; - const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord; - int num_free_chunks_since_last_med_boundary = -1; - int num_free_chunks_since_last_small_boundary = -1; - bool error = false; - char err[256]; - while (!error && chunk < invalid_chunk ) { - // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary. - // Reset the counter when encountering a non-free chunk. - if (chunk->get_chunk_type() != HumongousIndex) { - if (chunk->is_tagged_free()) { - // Count successive free, non-humongous chunks. - if (is_aligned(chunk, size_small)) { - if (num_free_chunks_since_last_small_boundary > 0) { - error = true; - jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT ".", p2i(chunk)); - } else { - num_free_chunks_since_last_small_boundary = 0; - } - } else if (num_free_chunks_since_last_small_boundary != -1) { - num_free_chunks_since_last_small_boundary ++; - } - if (is_aligned(chunk, size_med)) { - if (num_free_chunks_since_last_med_boundary > 0) { - error = true; - jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT ".", p2i(chunk)); - } else { - num_free_chunks_since_last_med_boundary = 0; - } - } else if (num_free_chunks_since_last_med_boundary != -1) { - num_free_chunks_since_last_med_boundary ++; - } - } else { - // Encountering a non-free chunk, reset counters. - num_free_chunks_since_last_med_boundary = -1; - num_free_chunks_since_last_small_boundary = -1; - } - } else { - // One cannot merge areas with a humongous chunk in the middle. Reset counters. - num_free_chunks_since_last_med_boundary = -1; - num_free_chunks_since_last_small_boundary = -1; - } - - if (error) { - print_map(tty, is_class()); - fatal("%s", err); - } + // ... and update the commit mask. + _commit_mask.mark_range_as_committed(p, word_size); - MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); - chunk = (Metachunk*) next; +#ifdef ASSERT + // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words + // in both class and non-class vslist (outside gtests). + if (_commit_limiter == CommitLimiter::globalLimiter()) { + assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch"); } +#endif + + return true; + } -#endif // ASSERT -void VirtualSpaceNode::inc_container_count() { +// Given an address range, ensure it is committed. +// +// The range does not have to be aligned to granule size. However, the function will always commit +// whole granules. +// +// Function will: +// - check how many granules in that region are uncommitted; If all are committed, it +// returns true immediately. +// - check if committing those uncommitted granules would bring us over the commit limit +// (GC threshold, MaxMetaspaceSize). If true, it returns false. +// - commit the memory. +// - mark the range as committed in the commit mask +// +// Returns true if success, false if it did hit a commit limit. +bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) { + assert_lock_strong(MetaspaceExpand_lock); - _container_count++; + assert(p != NULL && word_size > 0, "Sanity"); + + MetaWord* p_start = align_down(p, constants::commit_granule_bytes); + MetaWord* p_end = align_up(p + word_size, constants::commit_granule_bytes); + + // Todo: simple for now. Make it more intelligent late + return commit_range(p_start, p_end - p_start); + } -void VirtualSpaceNode::dec_container_count() { +// Given an address range (which has to be aligned to commit granule size): +// - uncommit it +// - mark it as uncommitted in the commit mask +bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) { + + DEBUG_ONLY(check_is_aligned_to_commit_granule(p);) + DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) assert_lock_strong(MetaspaceExpand_lock); - _container_count--; -} -VirtualSpaceNode::~VirtualSpaceNode() { - _rs.release(); - if (_occupancy_map != NULL) { - delete _occupancy_map; + // First calculate how large the committed regions in this range are + const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size); + DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);) + + if (committed_words_in_range == 0) { + return true; // Already fully uncommitted, nothing to do. } + + // Uncommit... + if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) { + // Note: this can actually happen, since uncommit may increase the number of mappings. + fatal("Failed to uncommit metaspace."); + } + + log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.", + committed_words_in_range * BytesPerWord); + + // ... tell commit limiter... + _commit_limiter->decrease_committed(committed_words_in_range); + + // ... and global counters... + _total_committed_words_counter->decrement_by(committed_words_in_range); + + // ... and update the commit mask. + _commit_mask.mark_range_as_uncommitted(p, word_size); + #ifdef ASSERT - size_t word_size = sizeof(*this) / BytesPerWord; - Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); + // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words + // in both class and non-class vslist (outside gtests). + if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario + assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch"); + } #endif + + return true; + } -size_t VirtualSpaceNode::used_words_in_vs() const { - return pointer_delta(top(), bottom(), sizeof(MetaWord)); +//// creation, destruction //// + +VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, + CommitLimiter* limiter, + SizeCounter* reserve_counter, + SizeCounter* commit_counter) + : _next(NULL), + _base(rs.base()), + _word_size(rs.size() / BytesPerWord), + _used_words(0), + _commit_mask(rs.base(), rs.size() / BytesPerWord), + _chunk_tree_array(rs.base(), rs.size() / BytesPerWord), + _commit_limiter(limiter), + _total_reserved_words_counter(reserve_counter), + _total_committed_words_counter(commit_counter) +{ + // Update reserved counter in vslist + _total_reserved_words_counter->increment_by(_word_size); } -// Space committed in the VirtualSpace -size_t VirtualSpaceNode::capacity_words_in_vs() const { - return pointer_delta(end(), bottom(), sizeof(MetaWord)); -} - -size_t VirtualSpaceNode::free_words_in_vs() const { - return pointer_delta(end(), top(), sizeof(MetaWord)); -} - -// Given an address larger than top(), allocate padding chunks until top is at the given address. -void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) { - - assert(target_top > top(), "Sanity"); - - // Padding chunks are added to the freelist. - ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class()); - - // shorthands - const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); - const size_t small_word_size = chunk_manager->small_chunk_word_size(); - const size_t med_word_size = chunk_manager->medium_chunk_word_size(); - - while (top() < target_top) { - - // We could make this coding more generic, but right now we only deal with two possible chunk sizes - // for padding chunks, so it is not worth it. - size_t padding_chunk_word_size = small_word_size; - if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) { - assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true. - padding_chunk_word_size = spec_word_size; - } - MetaWord* here = top(); - assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord)); - inc_top(padding_chunk_word_size); - - // Create new padding chunk. - ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class()); - assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity"); - - Metachunk* const padding_chunk = - ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this); - assert(padding_chunk == (Metachunk*)here, "Sanity"); - DEBUG_ONLY(padding_chunk->set_origin(origin_pad);) - log_trace(gc, metaspace, freelist)("Created padding chunk in %s at " - PTR_FORMAT ", size " SIZE_FORMAT_HEX ".", - (is_class() ? "class space " : "metaspace"), - p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord)); - - // Mark chunk start in occupancy map. - occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true); - - // Chunks are born as in-use (see MetaChunk ctor). So, before returning - // the padding chunk to its chunk manager, mark it as in use (ChunkManager - // will assert that). - do_update_in_use_info_for_chunk(padding_chunk, true); - - // Return Chunk to freelist. - inc_container_count(); - chunk_manager->return_single_chunk(padding_chunk); - // Please note: at this point, ChunkManager::return_single_chunk() - // may already have merged the padding chunk with neighboring chunks, so - // it may have vanished at this point. Do not reference the padding - // chunk beyond this point. - } +// Create a node of a given size +VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size, + CommitLimiter* limiter, + SizeCounter* reserve_counter, + SizeCounter* commit_counter) +{ - assert(top() == target_top, "Sanity"); + DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) -} // allocate_padding_chunks_until_top_is_at() + ReservedSpace rs(word_size * BytesPerWord, + constants::commit_granule_bytes, + false, // TODO deal with large pages + false); -// Allocates the chunk from the virtual space only. -// This interface is also used internally for debugging. Not all -// chunks removed here are necessarily used for allocation. -Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { - // Non-humongous chunks are to be allocated aligned to their chunk - // size. So, start addresses of medium chunks are aligned to medium - // chunk size, those of small chunks to small chunk size and so - // forth. This facilitates merging of free chunks and reduces - // fragmentation. Chunk sizes are spec < small < medium, with each - // larger chunk size being a multiple of the next smaller chunk - // size. - // Because of this alignment, me may need to create a number of padding - // chunks. These chunks are created and added to the freelist. - - // The chunk manager to which we will give our padding chunks. - ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class()); - - // shorthands - const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); - const size_t small_word_size = chunk_manager->small_chunk_word_size(); - const size_t med_word_size = chunk_manager->medium_chunk_word_size(); - - assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size || - chunk_word_size >= med_word_size, "Invalid chunk size requested."); - - // Chunk alignment (in bytes) == chunk size unless humongous. - // Humongous chunks are aligned to the smallest chunk size (spec). - const size_t required_chunk_alignment = (chunk_word_size > med_word_size ? - spec_word_size : chunk_word_size) * sizeof(MetaWord); - - // Do we have enough space to create the requested chunk plus - // any padding chunks needed? - MetaWord* const next_aligned = - static_cast(align_up(top(), required_chunk_alignment)); - if (!is_available((next_aligned - top()) + chunk_word_size)) { - return NULL; + if (!rs.is_reserved()) { + vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace"); } - // Before allocating the requested chunk, allocate padding chunks if necessary. - // We only need to do this for small or medium chunks: specialized chunks are the - // smallest size, hence always aligned. Homungous chunks are allocated unaligned - // (implicitly, also aligned to smallest chunk size). - if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top()) { - log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...", - (is_class() ? "class space " : "metaspace"), - top(), next_aligned); - allocate_padding_chunks_until_top_is_at(next_aligned); - // Now, top should be aligned correctly. - assert_is_aligned(top(), required_chunk_alignment); - } + reserve_counter->increment_by(word_size * BytesPerWord); - // Now, top should be aligned correctly. - assert_is_aligned(top(), required_chunk_alignment); + return create_node(rs, limiter, reserve_counter, commit_counter); - // Bottom of the new chunk - MetaWord* chunk_limit = top(); - assert(chunk_limit != NULL, "Not safe to call this method"); - - // The virtual spaces are always expanded by the - // commit granularity to enforce the following condition. - // Without this the is_available check will not work correctly. - assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), - "The committed memory doesn't match the expanded memory."); - - if (!is_available(chunk_word_size)) { - LogTarget(Trace, gc, metaspace, freelist) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); - // Dump some information about the virtual space that is nearly full - print_on(&ls); - } - return NULL; - } +} - // Take the space (bump top on the current virtual space). - inc_top(chunk_word_size); +// Create a node over an existing space +VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, + CommitLimiter* limiter, + SizeCounter* reserve_counter, + SizeCounter* commit_counter) +{ + reserve_counter->increment_by(rs.size() * BytesPerWord); + return new VirtualSpaceNode(rs, limiter, reserve_counter, commit_counter); +} - // Initialize the chunk - ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class()); - Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this); - assert(result == (Metachunk*)chunk_limit, "Sanity"); - occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true); - do_update_in_use_info_for_chunk(result, true); +VirtualSpaceNode::~VirtualSpaceNode() { + _rs.release(); - inc_container_count(); -#ifdef ASSERT - EVERY_NTH(VerifyMetaspaceInterval) - chunk_manager->locked_verify(true); - verify(true); - END_EVERY_NTH - do_verify_chunk(result); -#endif - - result->inc_use_count(); + // Update counters in vslist + _total_committed_words_counter->decrement_by(committed_words()); + _total_reserved_words_counter->decrement_by(_word_size); - return result; } -// Expand the virtual space (commit more of the reserved space) -bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { - size_t min_bytes = min_words * BytesPerWord; - size_t preferred_bytes = preferred_words * BytesPerWord; - size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); +//// Chunk allocation, splitting, merging ///// - if (uncommitted < min_bytes) { - return false; - } +// Allocate a root chunk from this node. Will fail and return NULL +// if the node is full. +// Note: this just returns a chunk whose memory is reserved; no memory is committed yet. +// Hence, before using this chunk, it must be committed. +// Also, no limits are checked, since no committing takes place. +Metachunk* VirtualSpaceNode::allocate_root_chunk() { + + assert_lock_strong(MetaspaceExpand_lock); + + assert_is_aligned(free_words, chklvl::MAX_CHUNK_WORD_SIZE); + + if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) { + + MetaWord* loc = _base + _used_words; + _used_words += chklvl::MAX_CHUNK_WORD_SIZE; + + // Create a new chunk tree for that new root node. + ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc); - size_t commit = MIN2(preferred_bytes, uncommitted); - bool result = virtual_space()->expand_by(commit, false); + // Create a root chunk header and initialize it; + Metachunk* c = tree->alloc_root_chunk_header(); + + // Wire it to the memory. + c->set_base(loc); + + DEBUG_ONLY(c->verify(true);) + return c; - if (result) { - log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.", - (is_class() ? "class" : "non-class"), commit); - DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded)); - } else { - log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.", - (is_class() ? "class" : "non-class"), commit); } - assert(result, "Failed to commit memory"); + return NULL; // Node is full. - return result; } -Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { +Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) { + assert_lock_strong(MetaspaceExpand_lock); - Metachunk* result = take_from_committed(chunk_word_size); - return result; -} -bool VirtualSpaceNode::initialize() { + // Get the tree associated with this chunk and let it handle the splitting + ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base()); + return tree->split(target_level, c, splinters); - if (!_rs.is_reserved()) { - return false; - } +} - // These are necessary restriction to make sure that the virtual space always - // grows in steps of Metaspace::commit_alignment(). If both base and size are - // aligned only the middle alignment of the VirtualSpace is used. - assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); - assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); - - // ReservedSpaces marked as special will have the entire memory - // pre-committed. Setting a committed size will make sure that - // committed_size and actual_committed_size agrees. - size_t pre_committed_size = _rs.special() ? _rs.size() : 0; - - bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, - Metaspace::commit_alignment()); - if (result) { - assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), - "Checking that the pre-committed memory was registered by the VirtualSpace"); +Metachunk* VirtualSpaceNode::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) { - set_top((MetaWord*)virtual_space()->low()); - } + assert_lock_strong(MetaspaceExpand_lock); + + // Get the tree associated with this chunk and let it handle the merging + ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base()); + return tree->merge(c, num_merged); - // Initialize Occupancy Map. - const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk; - _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size); - - return result; -} - -void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const { - size_t used_words = used_words_in_vs(); - size_t commit_words = committed_words(); - size_t res_words = reserved_words(); - VirtualSpace* vs = virtual_space(); - - st->print("node @" PTR_FORMAT ": ", p2i(this)); - st->print("reserved="); - print_scaled_words(st, res_words, scale); - st->print(", committed="); - print_scaled_words_and_percentage(st, commit_words, res_words, scale); - st->print(", used="); - print_scaled_words_and_percentage(st, used_words, res_words, scale); - st->cr(); - st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " - PTR_FORMAT ", " PTR_FORMAT ")", - p2i(bottom()), p2i(top()), p2i(end()), - p2i(vs->high_boundary())); } #ifdef ASSERT -void VirtualSpaceNode::mangle() { - size_t word_size = capacity_words_in_vs(); - Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); +// Verify counters and basic structure. Slow mode: verify all chunks in depth +void VirtualSpaceNode::verify(bool slow) const { + + assert_lock_strong(MetaspaceExpand_lock); + + assert(base() != NULL, "Invalid base"); + assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE); + assert(used_words() < word_size(), "Sanity"); + + // Since we only ever hand out root chunks from a vsnode, top should always be aligned + // to root chunk size. + assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE); + + _commit_mask.verify(slow); + _chunk_tree_array.verify(slow); + } -#endif // ASSERT -void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { - assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?"); -#ifdef ASSERT - verify(false); - EVERY_NTH(VerifyMetaspaceInterval) - verify(true); - END_EVERY_NTH -#endif - for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { - ChunkIndex index = (ChunkIndex)i; - size_t chunk_size = chunk_manager->size_by_index(index); - - while (free_words_in_vs() >= chunk_size) { - Metachunk* chunk = get_chunk_vs(chunk_size); - // Chunk will be allocated aligned, so allocation may require - // additional padding chunks. That may cause above allocation to - // fail. Just ignore the failed allocation and continue with the - // next smaller chunk size. As the VirtualSpaceNode comitted - // size should be a multiple of the smallest chunk size, we - // should always be able to fill the VirtualSpace completely. - if (chunk == NULL) { - break; - } - chunk_manager->return_single_chunk(chunk); - } - } - assert(free_words_in_vs() == 0, "should be empty now"); +// Returns sum of committed space, in words. +size_t VirtualSpaceNode::committed_words() const { + return _commit_mask.get_committed_size(); } +#endif + } // namespace metaspace --- old/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp 2019-07-22 11:08:09.277731758 +0200 +++ new/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp 2019-07-22 11:08:09.065729489 +0200 @@ -25,139 +25,207 @@ #ifndef SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP #define SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP +#include "memory/metaspace/constants.hpp" +#include "memory/metaspace/counter.hpp" +#include "memory/metaspace/chunkTree.hpp" +#include "memory/metaspace/commitMask.hpp" #include "memory/virtualspace.hpp" #include "memory/memRegion.hpp" #include "utilities/debug.hpp" +#include "utilities/bitMap.hpp" #include "utilities/globalDefinitions.hpp" + class outputStream; namespace metaspace { -class Metachunk; -class ChunkManager; -class OccupancyMap; +class CommitLimiter; -// A VirtualSpaceList node. +// VirtualSpaceNode manage a single address range of the Metaspace. +// +// That address range may contain interleaved committed and uncommitted +// regions. It keeps track of which regions have committed and offers +// functions to commit and uncommit regions. +// +// It allocates and hands out memory ranges, starting at the bottom. +// +// Address range must be aligned to root chunk size. +// class VirtualSpaceNode : public CHeapObj { - friend class VirtualSpaceList; // Link to next VirtualSpaceNode VirtualSpaceNode* _next; - // Whether this node is contained in class or metaspace. - const bool _is_class; - - // total in the VirtualSpace ReservedSpace _rs; - VirtualSpace _virtual_space; - MetaWord* _top; - // count of chunks contained in this VirtualSpace - uintx _container_count; - - OccupancyMap* _occupancy_map; - - // Convenience functions to access the _virtual_space - char* low() const { return virtual_space()->low(); } - char* high() const { return virtual_space()->high(); } - char* low_boundary() const { return virtual_space()->low_boundary(); } - char* high_boundary() const { return virtual_space()->high_boundary(); } - - // The first Metachunk will be allocated at the bottom of the - // VirtualSpace - Metachunk* first_chunk() { return (Metachunk*) bottom(); } - - // Committed but unused space in the virtual space - size_t free_words_in_vs() const; - - // True if this node belongs to class metaspace. - bool is_class() const { return _is_class; } - - // Helper function for take_from_committed: allocate padding chunks - // until top is at the given address. - void allocate_padding_chunks_until_top_is_at(MetaWord* target_top); - - public: - - VirtualSpaceNode(bool is_class, size_t byte_size); - VirtualSpaceNode(bool is_class, ReservedSpace rs) : - _next(NULL), _is_class(is_class), _rs(rs), _top(NULL), _container_count(0), _occupancy_map(NULL) {} - ~VirtualSpaceNode(); - - // Convenience functions for logical bottom and end - MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } - MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } - - const OccupancyMap* occupancy_map() const { return _occupancy_map; } - OccupancyMap* occupancy_map() { return _occupancy_map; } - - bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } - - size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } - size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } - - bool is_pre_committed() const { return _virtual_space.special(); } - - // address of next available space in _virtual_space; - // Accessors - VirtualSpaceNode* next() { return _next; } - void set_next(VirtualSpaceNode* v) { _next = v; } - - void set_top(MetaWord* v) { _top = v; } - // Accessors - VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } + // Start pointer of the area. + MetaWord* const _base; - // Returns true if "word_size" is available in the VirtualSpace - bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } + // Size, in words, of the whole node + const size_t _word_size; - MetaWord* top() const { return _top; } - void inc_top(size_t word_size) { _top += word_size; } + // Size, in words, of the range of this node which has been handed out in + // the form of chunks. + size_t _used_words; + + // The bitmap describing the commit state of the region: + // Each bit covers a region of 64K (see constants::commit_granule_size). + CommitMask _commit_mask; + + // An array of chunk trees. Each one describes fragmentation inside the associated root chunk. + ChunkTreeArray _chunk_tree_array; + + // Limiter object to ask before expanding the committed size of this node. + CommitLimiter* const _commit_limiter; + + // Points to outside size counters which we are to increase/decrease when we commit/uncommit + // space from this node. + SizeCounter* const _total_reserved_words_counter; + SizeCounter* const _total_committed_words_counter; + + /// committing, uncommitting /// + + // Given a pointer into this node, calculate the start of the commit granule + // the pointer points into. + MetaWord* calc_start_of_granule(MetaWord* p) const { + DEBUG_ONLY(check_pointer(p)); + return align_down(p, constants::commit_granule_bytes); + } + + // Given an address range, ensure it is committed. + // + // The range has to be aligned to granule size. + // + // Function will: + // - check how many granules in that region are uncommitted; If all are committed, it + // returns true immediately. + // - check if committing those uncommitted granules would bring us over the commit limit + // (GC threshold, MaxMetaspaceSize). If true, it returns false. + // - commit the memory. + // - mark the range as committed in the commit mask + // + // Returns true if success, false if it did hit a commit limit. + bool commit_range(MetaWord* p, size_t word_size); + + //// creation //// + + // Create a new empty node spanning the given reserved space. + VirtualSpaceNode(ReservedSpace rs, + CommitLimiter* limiter, + SizeCounter* reserve_counter, + SizeCounter* commit_counter); + + MetaWord* base() const { return _base; } + MetaWord* word_size() const { return _word_size; } + +public: + + // Create a node of a given size + static VirtualSpaceNode* create_node(size_t word_size, + CommitLimiter* limiter, + SizeCounter* reserve_counter, + SizeCounter* commit_counter); + + // Create a node over an existing space + static VirtualSpaceNode* create_node(ReservedSpace rs, + CommitLimiter* limiter, + SizeCounter* reserve_counter, + SizeCounter* commit_counter); - uintx container_count() { return _container_count; } - void inc_container_count(); - void dec_container_count(); - - // used and capacity in this single entry in the list - size_t used_words_in_vs() const; - size_t capacity_words_in_vs() const; - - bool initialize(); - - // get space from the virtual space - Metachunk* take_from_committed(size_t chunk_word_size); - - // Allocate a chunk from the virtual space and return it. - Metachunk* get_chunk_vs(size_t chunk_word_size); - - // Expands the committed space by at least min_words words. - bool expand_by(size_t min_words, size_t preferred_words); - - // In preparation for deleting this node, remove all the chunks - // in the node from any freelist. - void purge(ChunkManager* chunk_manager); - - // If an allocation doesn't fit in the current node a new node is created. - // Allocate chunks out of the remaining committed space in this node - // to avoid wasting that memory. - // This always adds up because all the chunk sizes are multiples of - // the smallest chunk size. - void retire(ChunkManager* chunk_manager); + ~VirtualSpaceNode(); - void print_on(outputStream* st) const { print_on(st, K); } - void print_on(outputStream* st, size_t scale) const; - void print_map(outputStream* st, bool is_class) const; + //// Chunk allocation, splitting, merging ///// - // Debug support - DEBUG_ONLY(void mangle();) - // Verify counters and basic structure. Slow mode: verify all chunks in depth and occupancy map. - DEBUG_ONLY(void verify(bool slow);) - // Verify that all free chunks in this node are ideally merged - // (there should not be multiple small chunks where a large chunk could exist.) - DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();) + // Allocate a root chunk from this node. Will fail and return NULL + // if the node is full. + // Note: this just returns a chunk whose memory is reserved; no memory is committed yet. + // Hence, before using this chunk, it must be committed. + // Also, no limits are checked, since no committing takes place. + Metachunk* allocate_root_chunk(); + + // Given a chunk c, split it recursively until you get a chunk of the given target_level. + // + // The original chunk must not be part of a freelist. + // + // Returns pointer to the result chunk; returns split off chunks in splinters array. + // + // Returns NULL if chunk cannot be split at least once. + Metachunk* split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]); + + // Given a chunk, attempt to merge it recursively with its neighboring chunks. + // + // If successful (merged at least once), returns address of + // the merged chunk; NULL otherwise. + // + // The merged chunks are removed from their freelist; the number of merged chunks is + // returned, split by level, in num_merged array. Note that these numbers does not + // include the original chunk. + // + // !!! Please note that if this method returns a non-NULL value, the + // original chunk will be invalid and should not be accessed anymore! !!! + Metachunk* merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]); + + + /// misc ///// + + // Returns size, in words, of the used space in this node alone. + // (Notes: + // - This is the space handed out to the ChunkManager, so it is "used" from the viewpoint of this node, + // but not necessarily used for Metadata. + // - This may or may not be committed memory. + size_t used_words() const { return _used_words; } + + // Returns size, in words, of how much space is left in this node alone. + size_t free_words() const { return _word_size - _used_words; } + + // Returns size, in words, of committed space in this node alone. + size_t committed_words() const; + + //// Committing/uncommitting memory ///// + + // Given an address range, ensure it is committed. + // + // The range does not have to be aligned to granule size. However, the function will always commit + // whole granules. + // + // Function will: + // - check how many granules in that region are uncommitted; If all are committed, it + // returns true immediately. + // - check if committing those uncommitted granules would bring us over the commit limit + // (GC threshold, MaxMetaspaceSize). If true, it returns false. + // - commit the memory. + // - mark the range as committed in the commit mask + // + // Returns true if success, false if it did hit a commit limit. + bool ensure_range_is_committed(MetaWord* p, size_t word_size); + + // Given an address range (which has to be aligned to commit granule size): + // - uncommit it + // - mark it as uncommitted in the commit mask + bool uncommit_range(MetaWord* p, size_t word_size); + + //// List stuff //// + VirtualSpaceNode* next() const { return _next; } + void set_next(VirtualSpaceNode* vsn) { _next = vsn; } + + /// Debug stuff //// + + // Verify counters and basic structure. Slow mode: verify all chunks in depth + bool contains(const MetaWord* p) const { + return p >= _base && p < _base + _used_words; + } + +#ifdef ASSERT + void check_pointer(const MetaWord* p) const { + assert(contains(p), "invalid pointer"); + } + void verify(bool slow) const; +#endif }; + } // namespace metaspace #endif // SHARE_MEMORY_METASPACE_VIRTUALSPACENODE_HPP --- old/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp 2019-07-22 11:08:09.769737024 +0200 +++ new/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp 2019-07-22 11:08:09.557734755 +0200 @@ -27,76 +27,25 @@ class MetaspaceChunkFreeListSummary { - size_t _num_specialized_chunks; - size_t _num_small_chunks; - size_t _num_medium_chunks; - size_t _num_humongous_chunks; - - size_t _specialized_chunks_size_in_bytes; - size_t _small_chunks_size_in_bytes; - size_t _medium_chunks_size_in_bytes; - size_t _humongous_chunks_size_in_bytes; + size_t _num_chunks; + size_t _size_chunks; public: MetaspaceChunkFreeListSummary() : - _num_specialized_chunks(0), - _num_small_chunks(0), - _num_medium_chunks(0), - _num_humongous_chunks(0), - _specialized_chunks_size_in_bytes(0), - _small_chunks_size_in_bytes(0), - _medium_chunks_size_in_bytes(0), - _humongous_chunks_size_in_bytes(0) + _num_chunks(0), + _size_chunks(0) {} - MetaspaceChunkFreeListSummary(size_t num_specialized_chunks, - size_t num_small_chunks, - size_t num_medium_chunks, - size_t num_humongous_chunks, - size_t specialized_chunks_size_in_bytes, - size_t small_chunks_size_in_bytes, - size_t medium_chunks_size_in_bytes, - size_t humongous_chunks_size_in_bytes) : - _num_specialized_chunks(num_specialized_chunks), - _num_small_chunks(num_small_chunks), - _num_medium_chunks(num_medium_chunks), - _num_humongous_chunks(num_humongous_chunks), - _specialized_chunks_size_in_bytes(specialized_chunks_size_in_bytes), - _small_chunks_size_in_bytes(small_chunks_size_in_bytes), - _medium_chunks_size_in_bytes(medium_chunks_size_in_bytes), - _humongous_chunks_size_in_bytes(humongous_chunks_size_in_bytes) + MetaspaceChunkFreeListSummary(size_t num_chunks, size_t size_chunks) : + _num_chunks(num_chunks), _size_chunks(size_chunks) {} - size_t num_specialized_chunks() const { - return _num_specialized_chunks; + size_t num_chunks() const { + return _num_chunks; } - size_t num_small_chunks() const { - return _num_small_chunks; - } - - size_t num_medium_chunks() const { - return _num_medium_chunks; - } - - size_t num_humongous_chunks() const { - return _num_humongous_chunks; - } - - size_t specialized_chunks_size_in_bytes() const { - return _specialized_chunks_size_in_bytes; - } - - size_t small_chunks_size_in_bytes() const { - return _small_chunks_size_in_bytes; - } - - size_t medium_chunks_size_in_bytes() const { - return _medium_chunks_size_in_bytes; - } - - size_t humongous_chunks_size_in_bytes() const { - return _humongous_chunks_size_in_bytes; + size_t size_chunks() const { + return _size_chunks; } }; --- old/src/hotspot/share/utilities/bitMap.cpp 2019-07-22 11:08:10.261742288 +0200 +++ new/src/hotspot/share/utilities/bitMap.cpp 2019-07-22 11:08:10.049740020 +0200 @@ -609,7 +609,7 @@ // then modifications in and to the left of the _bit_ being // currently sampled will not be seen. Note also that the // interval [leftOffset, rightOffset) is right open. -bool BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) { +bool BitMap::iterate(BitMapClosure* blk, idx_t leftOffset, idx_t rightOffset) const { verify_range(leftOffset, rightOffset); idx_t startIndex = word_index(leftOffset); --- old/src/hotspot/share/utilities/bitMap.hpp 2019-07-22 11:08:10.757747595 +0200 +++ new/src/hotspot/share/utilities/bitMap.hpp 2019-07-22 11:08:10.541745284 +0200 @@ -195,6 +195,7 @@ return calc_size_in_words(size_in_bits) * BytesPerWord; } + // Size, in number of bits, of this map. idx_t size() const { return _size; } idx_t size_in_words() const { return calc_size_in_words(size()); } idx_t size_in_bytes() const { return calc_size_in_bytes(size()); } @@ -253,11 +254,11 @@ void clear_large(); inline void clear(); - // Iteration support. Returns "true" if the iteration completed, false + // Iteration support [leftIndex, rightIndex). Returns "true" if the iteration completed, false // if the iteration terminated early (because the closure "blk" returned // false). - bool iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex); - bool iterate(BitMapClosure* blk) { + bool iterate(BitMapClosure* blk, idx_t leftIndex, idx_t rightIndex) const; + bool iterate(BitMapClosure* blk) const { // call the version that takes an interval return iterate(blk, 0, size()); } @@ -279,6 +280,9 @@ // aligned to bitsizeof(bm_word_t). idx_t get_next_one_offset_aligned_right(idx_t l_index, idx_t r_index) const; + // Returns the number of bits set between [l_index, r_index) in the bitmap. + idx_t count_one_bits(idx_t l_index, idx_t r_index) const; + // Returns the number of bits set in the bitmap. idx_t count_one_bits() const; --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/abstractPool.hpp 2019-07-22 11:08:11.037750591 +0200 @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_ABSTRACTPOOL_HPP +#define SHARE_MEMORY_METASPACE_ABSTRACTPOOL_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +namespace metaspace { + +// A simple helper class; +// +// holds a linear array of elements of type E; +// +// array lives in C heap and expands automatically; +// +// free elements can be returned and are kept in a freelist; +// +// elements can be retrieved by their index (index type I). +// +// E must be of at least sizeof(I), and I must be an integral size. +// +template +class AbstractPool { +public: + + typedef I size_type_t; + + static const I invalid_idx = (I)0; + +private: + + E* _arr; + size_type_t _capacity; + size_type_t _used; + I _freelist; + + const char* const _name; + + // Number of elements in the freelist + DEBUG_ONLY(size_type_t _num_in_freelist;) + + // Enlarge internal array if needed. Zero out new portion of array. + void enlarge_to(size_type_t new_len) { + assert(new_len > _capacity && new_len <= max_size, "Sanity"); + const size_t new_size_bytes = new_len * sizeof(E); + E* p = (E*) os::realloc(_arr, new_size_bytes, mtInternal); + if (p == NULL) { + vm_exit_out_of_memory(new_size_bytes, OOM_MALLOC_ERROR, "Pool %s: not enough space", _name); + } + _arr = p; _capacity = new_len; + } + + //// freelist handling //// + + struct free_elem_t { + I next_idx; + }; + + STATIC_ASSERT(sizeof(E) >= sizeof(free_elem_t)); + STATIC_ASSERT(initial_size > 0); + STATIC_ASSERT(size_increase > 0); + STATIC_ASSERT(max_size >= size_increase + initial_size); + + E* take_from_freelist() { + I idx = _freelist; + if (invalid_idx == idx) { + return NULL; + } + E* p = elem_at_index(idx); + _freelist = ((free_elem_t*)p)->next_idx; + assert(_num_in_freelist > 0, "counter underflow"); + DEBUG_ONLY(_num_in_freelist --;) + return p; + } + + void add_to_freelist(E* p) { + ((free_elem_t*)p)->next_idx = _freelist; + _freelist = index_for_elem(p); + DEBUG_ONLY(_num_in_freelist ++;) + } + + +#ifdef ASSERT + void check_elem(E* p) { assert(is_valid_elem(p), "invalid pointer"); } + void check_idx(I i) { assert(i <= _used, "invalid index"); } +#endif + +public: + + AbstractPool(const char* name) + : _arr(NULL), _capacity(0), _used(0), _name(name), _freelist(NULL) {} + ~AbstractPool() { os::free(_arr); } + + I index_for_elem(const E* p) const { + DEBUG_ONLY(check_elem(p)); + return p - _arr; + } + + E* elem_at_index(I i) const { + DEBUG_ONLY(check_idx(i)); + return _arr + i; + } + + // Allocate a new element. Enlarge internal array if needed. + // Will return NULL if max_size is reached. + E* allocate_element() { + + E* p = take_from_freelist(); + if (p != NULL) { + return p; + } + + if (_capacity == _used) { + size_type_t new_len = _capacity == 0 ? initial_size : _capacity + size_increase; + if (new_len > max_size) { + new_len = max_size; + if (new_len <= _capacity) { + return NULL; + } + } + enlarge_to(new_len); + } + + // Avoid handing out anything at index 0. + // This allows callers to use "I index == 0" as "invalid ref". + if (_used == 0) { + _used ++; + } + + E* p = _arr[_used ++]; + return p; + + } + + void return_element(E* p) { + DEBUG_ONLY(check_elem(p)); + add_to_freelist(p); + } + + // Returns true if p was allocated from this pool. + bool is_valid_elem(const E* p) const { return p >= _arr && p < _arr + _used; } + + bool is_valid_index(I idx) const { return idx < _used; } + + // Returns number of allocated elements (including those returned to free list) + size_type_t used() const { return _used; } + + // Returns number of elements in free list + size_type_t free() const { return _num_in_freelist; } + + // Returns size of memory used. + size_t memory_footprint() const { return _capacity * sizeof(E); } + +}; + + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_ABSTRACTPOOL_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkAllocSequence.cpp 2019-07-22 11:08:11.545756026 +0200 @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "memory/metaspace/chunkAllocSequence.hpp" +#include "memory/metaspace/chunkLevel.hpp" +#include "utilities/globalDefinitions.hpp" + +namespace metaspace { + + + + +// A chunk allocation sequence which can be encoded with a simple const array. +class ConstantChunkAllocSequence : public ChunkAllocSequence { + + // integer array specifying chunk level allocation progression. + // Last chunk is to be an endlessly repeated allocation. + const chklvl_t* const _entries; + const int _num_entries; + +public: + + ConstantChunkAllocSequence(const chklvl_t* array, int num_entries) + : _entries(array) + , _num_entries(num_entries) + { + assert(_num_entries > 0, "must not be empty."); + } + + chklvl_t get_next_chunk_level(int num_allocated) const { + if (num_allocated >= _num_entries) { + // Caller shall repeat last allocation + return _entries[_num_entries - 1]; + } + return _entries[_num_entries]; + } + +}; + +// hard-coded chunk allocation sequences for various space types + +/////////////////////////// +// chunk allocation sequences for normal loaders: +static const chklvl_t g_sequ_standard_nonclass[] = { + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_64K + -1 // .. repeat last +}; + +static const chklvl_t g_sequ_standard_class[] = { + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_4K, + chklvl::CHUNK_LEVEL_32K, + -1 // .. repeat last +}; + +/////////////////////////// +// chunk allocation sequences for reflection/anonymous loaders: +// We allocate four smallish chunks before progressing to bigger chunks. +static const chklvl_t g_sequ_anon_nonclass[] = { + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_4K, + -1 // .. repeat last +}; + +static const chklvl_t g_sequ_anon_class[] = { + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_1K, + chklvl::CHUNK_LEVEL_4K, + -1 // .. repeat last +}; + +#define DEFINE_CLASS_FOR_ARRAY(what) \ + static ConstantChunkAllocSequence g_chunk_alloc_sequence_##what (g_sequ_##what, sizeof(g_sequ_##what)/sizeof(int)); + +DEFINE_CLASS_FOR_ARRAY(standard_nonclass) +DEFINE_CLASS_FOR_ARRAY(standard_class) +DEFINE_CLASS_FOR_ARRAY(anon_nonclass) +DEFINE_CLASS_FOR_ARRAY(anon_class) + + +class BootLoaderChunkAllocSequence : public ChunkAllocSequence { + + // For now, this mirrors what the old code did + // (see SpaceManager::get_initial_chunk_size() and SpaceManager::calc_chunk_size). + + // Not sure how much sense this still makes, especially with CDS - by default we + // now load JDK classes from CDS and therefore most of the boot loader + // chunks remain unoccupied. + + // Also, InitialBootClassLoaderMetaspaceSize was/is confusing since it only applies + // to the non-class chunk. + + const bool _is_class; + + static chklvl_t calc_initial_chunk_level(bool is_class) { + + size_t word_size = 0; + if (is_class) { + // In the old version first class space chunk for boot loader was always medium class chunk size * 6. + word_size = 32 * K * 6; + + } else { + assert(InitialBootClassLoaderMetaspaceSize < chklvl::MAX_CHUNK_BYTE_SIZE, + "InitialBootClassLoaderMetaspaceSize too large"); + word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; + } + return chklvl::level_fitting_word_size(word_size); + } + +public: + + BootLoaderChunkAllocSequence(bool is_class) + : _is_class(is_class) + {} + + chklvl_t get_next_chunk_level(int num_allocated) const { + if (num_allocated == 0) { + return calc_initial_chunk_level(_is_class); + } + // bit arbitrary, but this is what the old code did. Can tweak later if needed. + return chklvl::CHUNK_LEVEL_64K; + } + +}; + +static BootLoaderChunkAllocSequence g_chunk_alloc_sequence_boot_non_class(false); +static BootLoaderChunkAllocSequence g_chunk_alloc_sequence_boot_class(true); + + +const ChunkAllocSequence* ChunkAllocSequence::alloc_sequence_by_space_type(Metaspace::MetaspaceType space_type, bool is_class) { + + if (is_class) { + switch(space_type) { + case Metaspace::StandardMetaspaceType: return &g_chunk_alloc_sequence_standard_class; + case Metaspace::ReflectionMetaspaceType: + case Metaspace::UnsafeAnonymousMetaspaceType: return &g_chunk_alloc_sequence_anon_class; + case Metaspace::BootMetaspaceType: return &g_chunk_alloc_sequence_boot_non_class; + default: ShouldNotReachHere(); + } + } else { + switch(space_type) { + case Metaspace::StandardMetaspaceType: return &g_chunk_alloc_sequence_standard_class; + case Metaspace::ReflectionMetaspaceType: + case Metaspace::UnsafeAnonymousMetaspaceType: return &g_chunk_alloc_sequence_anon_class; + case Metaspace::BootMetaspaceType: return &g_chunk_alloc_sequence_boot_class; + default: ShouldNotReachHere(); + } + } + + return NULL; + +} + + + +} // namespace metaspace + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkAllocSequence.hpp 2019-07-22 11:08:12.053761461 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_CHUNKALLOCSEQUENCE_HPP +#define SHARE_MEMORY_METASPACE_CHUNKALLOCSEQUENCE_HPP + +#include "memory/metaspace.hpp" // For Metaspace::MetaspaceType +#include "memory/metaspace/chunkLevel.hpp" + +namespace metaspace { + + +class ChunkAllocSequence { +public: + + virtual chklvl_t get_next_chunk_level(int num_allocated) const = 0; + + // Given a space type, return the correct allocation sequence to use. + // The returned object is static and read only. + static const ChunkAllocSequence* alloc_sequence_by_space_type(Metaspace::MetaspaceType space_type, bool is_class); + +}; + + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_CHUNKALLOCSEQUENCE_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkLevel.cpp 2019-07-22 11:08:12.561766895 +0200 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019, SAP and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "memory/metaspace/chunkLevel.hpp" +#include "utilities/globalDefinitions.hpp" + +namespace metaspace { + +chklvl_t chklvl::level_fitting_word_size(size_t word_size) { + + // This can be probably done better. + chklvl_t l = chklvl::LOWEST_CHUNK_LEVEL; + while (word_size > chklvl::word_size_for_level(l)) { + l ++; + } + return l; + +} + + +} // namespace metaspace + + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkLevel.hpp 2019-07-22 11:08:13.069772330 +0200 @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_CHUNKLEVEL_HPP +#define SHARE_MEMORY_METASPACE_CHUNKLEVEL_HPP + +#include "utilities/globalDefinitions.hpp" + +// Constants for the chunk levels and some utility functions. + +namespace metaspace { + +// Metachunk level (must be signed) +typedef signed int chklvl_t; + +// Chunks are managed by a binary buddy allocator. + +// Chunk sizes range from 1K to 4MB (64bit). +// +// Reasoning: .... TODO explain + +// Each chunk has a level; the level corresponds to its position in the tree +// and describes its size. +// +// The largest chunks are called root chunks, of 4MB in size, and have level 0. +// From there on it goes: +// +// size level +// 4MB 0 +// 2MB 1 +// 1MB 2 +// 512K 3 +// 256K 4 +// 128K 5 +// 64K 6 +// 32K 7 +// 16K 8 +// 8K 9 +// 4K 10 +// 2K 11 +// 1K 12 + +namespace chklvl { + +static const size_t MAX_CHUNK_BYTE_SIZE = 4 * M; +static const int NUM_CHUNK_LEVELS = 13; +static const size_t MIN_CHUNK_BYTE_SIZE = (MAX_CHUNK_BYTE_SIZE >> (size_t)NUM_CHUNK_LEVELS); + +static const size_t MIN_CHUNK_WORD_SIZE = MIN_CHUNK_BYTE_SIZE / sizeof(MetaWord); +static const size_t MAX_CHUNK_WORD_SIZE = MAX_CHUNK_BYTE_SIZE / sizeof(MetaWord); + +static const chklvl_t ROOT_CHUNK_LEVEL = 0; + +static const chklvl_t HIGHEST_CHUNK_LEVEL = NUM_CHUNK_LEVELS - 1; +static const chklvl_t LOWEST_CHUNK_LEVEL = 0; + +inline bool is_valid_level(chklvl_t level) { + return level <= HIGHEST_CHUNK_LEVEL; +} + +inline void check_valid_level(chklvl_t lvl) { + assert(is_valid_level(lvl), "invalid level (%d)", (int)lvl); +} + +// Given a level return the chunk size, in words. +inline size_t word_size_for_level(chklvl_t level) { + assert(is_valid_level(level), "invalid chunk level (%d)", level); + return MAX_CHUNK_BYTE_SIZE >> level; +} + +// Given an arbitrary word size smaller than the highest chunk size, +// return the highest chunk level able to hold this size. +chklvl_t level_fitting_word_size(size_t word_size); + +// Shorthands to refer to exact sizes +static const chklvl_t CHUNK_LEVEL_4M = ROOT_CHUNK_LEVEL; +static const chklvl_t CHUNK_LEVEL_2M = (ROOT_CHUNK_LEVEL + 1); +static const chklvl_t CHUNK_LEVEL_1M = (ROOT_CHUNK_LEVEL + 2); +static const chklvl_t CHUNK_LEVEL_512K = (ROOT_CHUNK_LEVEL + 3); +static const chklvl_t CHUNK_LEVEL_256K = (ROOT_CHUNK_LEVEL + 4); +static const chklvl_t CHUNK_LEVEL_128K = (ROOT_CHUNK_LEVEL + 5); +static const chklvl_t CHUNK_LEVEL_64K = (ROOT_CHUNK_LEVEL + 6); +static const chklvl_t CHUNK_LEVEL_32K = (ROOT_CHUNK_LEVEL + 7); +static const chklvl_t CHUNK_LEVEL_16K = (ROOT_CHUNK_LEVEL + 8); +static const chklvl_t CHUNK_LEVEL_8K = (ROOT_CHUNK_LEVEL + 9); +static const chklvl_t CHUNK_LEVEL_4K = (ROOT_CHUNK_LEVEL + 10); +static const chklvl_t CHUNK_LEVEL_2K = (ROOT_CHUNK_LEVEL + 11); +static const chklvl_t CHUNK_LEVEL_1K = (ROOT_CHUNK_LEVEL + 12); + +STATIC_ASSERT(CHUNK_LEVEL_1K == HIGHEST_CHUNK_LEVEL); + +} // namespace chklvl + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_BLOCKFREELIST_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkTree.cpp 2019-07-22 11:08:13.581777806 +0200 @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +//#include "logging/log.hpp" +#include "memory/metaspace/chunkTree.hpp" +#include "memory/metaspace/metachunk.hpp" +//#include "utilities/ostream.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +namespace metaspace { + + +#ifdef ASSERT + +void ChunkTree::check_is_valid_ref(ref_t ref) { + const u2 raw_idx = get_raw_index_from_reference(ref); + const u2 info = get_info_from_reference(ref); + switch (info) { + case node_marker: + assert(_nodePool.is_valid_index(raw_idx), "Invalid node ref: %u", (unsigned)ref); + break; + case free_chunk_marker: + case used_chunk_marker: + assert(_chunkPool.is_valid_index(raw_idx), "Invalid chunk ref: %u", (unsigned)ref); + break; + default: + ShouldNotReachHere(); + } +} + +void ChunkTree::verify_node(const btnode_t* n) { + + const ref_t node_ref = encode_reference_for_node(n); + + // Check parent. + const ref_t parent_ref = n->parent; + + if (parent_ref == 0) { + assert(node_ref == _root, "This should be the root node"); + } else { + btnode_t* parent_node = resolve_reference_to_node(parent_ref); + assert(parent_node->child[0] == node_ref || + parent_node->child[1] == node_ref, "Incorrect parent backlink."); + } + + // Check children. + for (int i = 0; i < 2; i ++) { + const ref_t child_ref = n->child[i]; + assert(child_ref != 0, "child ref null"); + + // May be either a chunk or another node. + if (reference_is_chunk(child_ref)) { + Metachunk* c = resolve_reference_to_chunk(child_ref); + assert(c->tree_node_ref() == node_ref, "Incorrect parent backlink."); + if (reference_is_free_chunk(child_ref)) { + assert(c->is_free(), "free info mismatch"); + } else { + assert(c->is_in_use(), "free info mismatch"); + } + } else { + // This chunk is another node + btnode_t* n = resolve_reference_to_node(child_ref); + assert(n->parent == node_ref, "Incorrect parent backlink."); + } + } + +} +#endif + + +// Initialize: allocate a root node and a root chunk header; return the +// root chunk header. It will be partly initialized. +// Note: this just allocates a memory-less header; memory itself is allocated inside VirtualSpaceNode. +Metachunk* ChunkTree::alloc_root_chunk_header() { + + assert(_root == 0, "already have a root"); + + Metachunk* c = _chunkPool.allocate_element(); + c->wipe(); + + c->set_level(chklvl::ROOT_CHUNK_LEVEL); + + const ref_t root_node_ref = encode_reference_for_chunk(c, true); + + _root = root_node_ref; + + return c; + +} + +// Given a chunk c, split it once. +// +// The original chunk must not be part of a freelist. +// +// Returns pointer to the result chunk; updates the splinters array to return the splintered off chunk. +// +// Returns NULL if chunk cannot be split any further. +Metachunk* ChunkTree::split_once(Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) { + + assert(c->is_free(), "Can only split free chunks."); + + if (c->level() == chklvl::HIGHEST_CHUNK_LEVEL) { + return NULL; + } + + const ref_t orig_chunk_ref = encode_reference_for_chunk(c, true); + + // Split chunk into two chunks. + Metachunk* leader = c; + Metachunk* follower = allocate_new_chunk(); + follower->wipe(); + + const chklvl_t new_level = c->level() + 1; + const size_t new_word_size = chklvl::word_size_for_level(new_level); + const size_t old_committed_words = c->committed_words(); + + assert(new_word_size == c->word_size() / 2, "Sanity"); + assert(old_committed_words <= c->word_size(), "Sanity"); + + leader->set_level(new_level); + follower->set_level(new_level); + + follower->set_base(c->base() + new_word_size); + + // Carry over commit boundary to new chunk; this is an optimization + // to avoid too frequent commit requests. + if (old_committed_words >= new_word_size) { + leader->set_committed_words(new_word_size); + follower->set_committed_words(old_committed_words - new_word_size); + } else { + leader->set_committed_words(old_committed_words); + follower->set_committed_words(0); + } + + // Create a new parent node for the two sibling chunks. + btnode_t* const new_parent = allocate_new_node(); + const ref_t new_parent_ref = encode_reference_for_node(new_parent); + + // Replace the reference to the original chunk in the parent node with + // the reference to the parent node. + if (_root == orig_chunk_ref) { + assert(new_level == chklvl::ROOT_CHUNK_LEVEL + 1, "Original chunk should have been root chunk."); + assert(c->tree_node_ref() == 0, "Original chunk should not have a parent."); + _root = new_parent_ref; + } else { + const ref_t old_parent_ref = c->tree_node_ref(); + btnode_t* const old_parent = resolve_reference_to_node(old_parent_ref); + assert(old_parent != NULL, "Original chunk should have a parent."); + if (old_parent->child[0] == orig_chunk_ref) { + old_parent->child[0] = new_parent_ref; + } else if (old_parent->child[1] == orig_chunk_ref) { + old_parent->child[1] = new_parent_ref; + } else { + ShouldNotReachHere(); + } + } + + // Tell the two new chunks who their parent is. + c->set_tree_node_ref(new_parent_ref); + follower->set_tree_node_ref(new_parent_ref); + + // Remember the splintered off chunk in the splinters array + assert(splinters[new_level] == NULL, "Sanity"); + splinters[new_level] = follower; + + return c; + +} + +// Given a chunk, attempt to merge it with its sibling if it is free. +// Returns pointer to the result chunk if successful, NULL otherwise. +// +// Returns number of merged chunks, by chunk level, in num_merged array. These numbers +// includes the original chunk. +// +// !!! Please note that if this method returns a non-NULL value, the +// original chunk will be invalid and should not be accessed anymore! !!! +Metachunk* ChunkTree::merge_once(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) { + + assert(c->is_free(), "Only call for free chunks."); + + const chklvl_t orig_level = c->level(); + + // If chunk is already a root chunk, we cannot merge any further. + if (orig_level == chklvl::ROOT_CHUNK_LEVEL) { + return NULL; + } + + const ref_t orig_chunk_ref = encode_reference_for_chunk(c, true); + + // Get parent node and parent ref. + const ref_t parent_ref = c->tree_node_ref(); + btnode_t* const parent = resolve_reference_to_node(parent_ref); + assert(parent != NULL, "Chunk should have a parent."); // Since we left for root chunks already + + // Check if sibling are free and not splintered. + if (reference_is_free_chunk(parent->child[0]) == false || + reference_is_free_chunk(parent->child[1]) == false) { + return NULL; + } + + // Merge chunks. + Metachunk* c1 = resolve_reference_to_chunk(parent->child[0]); + Metachunk* c2 = resolve_reference_to_chunk(parent->child[1]); + + // At this point, both metachunks should be free and not splintered. + assert(c1->is_free() && c2->is_free() && c1->level() == c2->level(), "Sanity"); + + // Find out who is leader. Let the leader live on. + Metachunk* leader = c1; + Metachunk* follower = c2; + + if (c1->base() > c2->base()) { + leader = c2; follower = c1; + } + + // Chunk memory should be adjacent to each other. + assert(leader->base() + leader->word_size() == follower->base(), "Wrong geometry"); + + leader->set_level(orig_level - 1); + + // Carry over committed words. + size_t new_committed_words = leader->committed_words(); + if (leader->is_fully_committed()) { + new_committed_words += follower->committed_words(); + } + assert(new_committed_words <= leader->word_size(), "Sanity"); + leader->set_committed_words(new_committed_words); + + const ref_t leader_ref = encode_reference_for_chunk(leader, true); + + // Re-hang new merged chunk in tree one level up. + const ref_t grand_parent_ref = parent->parent; + if (grand_parent_ref == 0) { + // Seems old parent node was root. That means we should have a root chunk now. + assert(leader->level() == chklvl::ROOT_CHUNK_LEVEL, "Sanity"); + // Which we just hang under _root... + _root = leader_ref; + } else { + btnode_t* grand_parent = resolve_reference_to_node(grand_parent_ref); + if (grand_parent->child[0] == parent_ref) { + grand_parent->child[0] = leader_ref; + } else if (grand_parent->child[1] == parent_ref) { + grand_parent->child[1] = leader_ref; + } else { + ShouldNotReachHere(); + } + } + + // Remove the follower from its free list + follower->remove_from_list(); + + // Adjust stats + num_merged[orig_level] ++; + + // Release the superfluous chunk header, and the old parent node. + release_chunk(follower); + release_node(parent); + + return leader; + +} + +// Given a chunk c, split it recursively until you get a chunk of the given target_level. +// +// The original chunk must not be part of a freelist. +// +// Returns pointer to the result chunk; returns split off chunks in splinters array. +// +// Returns NULL if chunk cannot be split at least once. +Metachunk* ChunkTree::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) { + + DEBUG_ONLY(chklvl::check_valid_level(target_level)); + assert(target_level > c->level(), "Wrong target level"); + + Metachunk* c_last = split_once(c, splinters); + while (c_last != NULL && c_last->level() < target_level) { + c_last = split_once(c, splinters); + } + + assert(c != NULL, "Sanity"); + + return c; + +} + +// Given a chunk, attempt to merge it recursively with its neighboring chunks. +// +// If successful (merged at least once), returns address of +// the merged chunk; NULL otherwise. +// +// The merged chunks are removed from their freelist; the number of merged chunks is +// returned, split by level, in num_merged array. Note that these numbers does not +// include the original chunk. +// +// !!! Please note that if this method returns a non-NULL value, the +// original chunk will be invalid and should not be accessed anymore! !!! +Metachunk* ChunkTree::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) { + + DEBUG_ONLY(c->verify(false);) + assert(c->is_free(), "Only merge free chunks."); + + assert(c->level() > chklvl::ROOT_CHUNK_LEVEL, "Do not merge root chunks"); + + // Original chunk should be outside the chunk manager freelist before attempting to merge. + assert(c->prev() == NULL && c->next() == NULL, "Remove chunk from freelist before merging."); + + Metachunk* c_last = merge_once(c, num_merged); + while (c_last != NULL && c_last->level() > chklvl::ROOT_CHUNK_LEVEL) { + c_last = merge_once(c, num_merged); + } + + return c_last; +} + + +//// tree traversal //// + +bool ChunkTree::iterate_chunks_helper(ref_t ref, ChunkClosure* cc) const { + if (reference_is_chunk(ref)) { + Metachunk* const c = resolve_reference_to_chunk(ref); + return cc->do_chunk(c); + } else { + btnode_t* const n = resolve_reference_to_node(ref); + assert(n->child[0] != 0 && n->child[1] != 0, "nodes have always children"); + return iterate_chunks_helper(n->child[0], cc) && + iterate_chunks_helper(n->child[1], cc); + } + + return true; +} + +// Iterate over all nodes in this tree. Returns true for complete traversal, +// false if traversal was cancelled. +bool ChunkTree::iterate_chunks(ChunkClosure* cc) const { + + if (_root == 0) { + return true; + } + + return iterate_chunks_helper(_root, cc); +} + + + +#ifdef ASSERT + +// Helper for verify() +void ChunkTree::verify_helper(bool slow, ref_t ref, const MetaWord* p, int* num_chunks, int* num_nodes) const { + + if (reference_is_node(ref)) { + + // A node + + const btnode_t* const n = resolve_reference_to_node(ref); + + verify_node(n); + + (*num_nodes) ++; + + // Verify childs recursively. + verify_helper(slow, n->child[0], p, num_chunks, num_nodes); + verify_helper(slow, n->child[1], p, num_chunks, num_nodes); + + } else { + + // A chunk. + + const Metachunk* const c = resolve_reference_to_chunk(ref); + + assert(c->is_free() == reference_is_free_chunk(ref), "free info mismatch"); + + // Chunks in the tree should be ordered by base address of the range + // they represent, and there should be no address holes. + if (p == NULL) { + p = c->base() + c->word_size(); + } else { + assert(c->base() == p, "Chunk base address mismatch."); + } + + c->verify(slow); + + (*num_chunks) ++; + + } + +} + +void ChunkTree::verify(bool slow, const MetaWord* base) const { + + int num_chunks = 0; + int num_nodes = 0; + + verify_helper(slow, _root, base, &num_chunks, &num_nodes); + + // Number of chunks in the tree should be equal to the number of chunks + // taken from the pool; same for nodes. + assert(num_chunks == _chunkPool.used() - _chunkPool.free(), "chunk count mismatch"); + assert(num_nodes == _nodePool.used() - _nodePool.free(), "node count mismatch"); + +} + +#endif // ASSERT + + + + +//// ChunkTreeArray /////////////777 + +// Create an array of ChunkTree objects, all initialized to NULL, covering +// a given memory range. Memory range must be aligned to size of root chunks. +ChunkTreeArray::ChunkTreeArray(const MetaWord* base, size_t word_size) + : _base(base), _word_size(word_size), + _arr(NULL), _num(0) +{ + assert(is_aligned(_word_size, chklvl::MAX_CHUNK_WORD_SIZE), "not aligned"); + _num = _word_size / chklvl::MAX_CHUNK_WORD_SIZE; + _arr = NEW_C_HEAP_ARRAY(ChunkTree*, _num, mtInternal); + for (int i = 0; i < _num; i ++) { + _arr[i] = NULL; + } +} + +ChunkTreeArray::~ChunkTreeArray() { + FREE_C_HEAP_ARRAY(ChunkTree*, _arr); +} + + +// Iterate over all nodes in all trees. Returns true for complete traversal, +// false if traversal was cancelled. +bool ChunkTreeArray::iterate_chunks(ChunkClosure* cc) const { + for (int i = 0; i < _num; i ++) { + if (_arr[i] != NULL) { + if (_arr[i]->iterate_chunks(cc) == false) { + return false; + } + } + } +} + +#ifdef ASSERT +void ChunkTreeArray::verify(bool slow) const { + for (int i = 0; i < _num; i ++) { + if (_arr[i] != NULL) { + _arr[i]->verify(slow, _base); + } + } +} +#endif + + +} // namespace metaspace + + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/chunkTree.hpp 2019-07-22 11:08:14.109783452 +0200 @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_CHUNKTREE_HPP +#define SHARE_MEMORY_METASPACE_CHUNKTREE_HPP + +#include "memory/metaspace/abstractPool.hpp" +#include "memory/metaspace/chunkLevel.hpp" +#include "memory/metaspace/metachunk.hpp" + +namespace metaspace { + +// Chunks live in a binary tree. +// + +class ChunkClosure { + public: + // Return false to cancel traversal. + virtual bool do_chunk(Metachunk* chunk) = 0; +}; + + +class ChunkTree { + + typedef u2 ref_t; + + struct btnode_t { + + ref_t parent; + ref_t child[2]; + + }; + + // full expanded (max fragmentation, 4MB root chunk split into 4096 1K chunks): 4096 headers, 4095 nodes. + // typical expansion (4MB root chunk split into 64 64K chunks): 64 headers, 32 nodes under each => 1984 nodes + + typedef AbstractPool NodePoolType; + typedef AbstractPool ChunkPoolType; + NodePoolType _nodePool; + ChunkPoolType _chunkPool; + + // The upper two bits of a reference encode information about it. + // bit 0,1: 00 - reference is a btnode_t + // 10 - reference is a free chunk + // 11 - reference is a chunk in use. + // This also means a reference has to get by with 14 bits. Which covers 16K, which is enough for both + // chunk headers and nodes within one root chunk area. + static const u2 highest_possible_index = (1 << 14) - 1; + static const u2 node_marker = 0; + static const u2 free_chunk_marker = 2; + static const u2 used_chunk_marker = 3; + + static u2 get_raw_index_from_reference(ref_t ref) { return 0x3FFF & ref; } + static u2 get_info_from_reference(ref_t ref) { return 0xc000 & ref; } + + static u2 encode_reference(u2 raw_idx, u2 info) { + assert(raw_idx <= highest_possible_index, "invalid index"); + return (info << 14) | raw_idx; + } + +#ifdef ASSERT + static bool reference_is_node(ref_t ref) { return get_info_from_reference(ref) == node_marker; } + static bool reference_is_chunk(ref_t ref) { u2 i = get_info_from_reference(ref); return i == free_chunk_marker || i == used_chunk_marker; } + static bool reference_is_used_chunk(ref_t ref) { return get_info_from_reference(ref) == used_chunk_marker; } + + static void check_is_valid_node_ref(ref_t ref) { assert(resolve_reference_to_node(ref) != NULL, "invalid node ref"); } + static void check_is_valid_chunk_ref(ref_t ref) { assert(resolve_reference_to_chunk(ref) != NULL, "invalid chunk ref"); } + static void check_is_valid_ref(ref_t ref); +#endif + + static bool reference_is_free_chunk(ref_t ref) { return get_info_from_reference(ref) == free_chunk_marker; } + + // Given a reference we know to be a node, resolve it to the node pointer. + btnode_t* resolve_reference_to_node(ref_t ref) const { + assert(reference_is_node(ref), "Not a node ref"); + return _nodePool.elem_at_index(get_raw_index_from_reference(ref)); + } + + // Allocate a new node. Node is uninitialized. + // Returns pointer to node, and reference in ref. + btnode_t* allocate_new_node() { + return _nodePool.allocate_element(); + } + + // Given a node pointer, return its correctly encoded reference. + ref_t encode_reference_for_node(const btnode_t* n) const { + const u2 raw_idx = _nodePool.index_for_elem(n); + return encode_reference(raw_idx, node_marker); + } + + // Release a node to the pool. + void release_node(btnode_t* n) { + _nodePool.return_element(n); + } + + // Given a reference we know to be a chunk, resolve it to the chunk pointer. + Metachunk* resolve_reference_to_chunk(ref_t ref) const { + assert(reference_is_chunk(ref), "Not a chunk ref"); + return _chunkPool.elem_at_index(get_raw_index_from_reference(ref)); + } + + // Allocate a new node. Node is uninitialized. + // Returns pointer to node, and reference in ref. + Metachunk* allocate_new_chunk() { + return _chunkPool.allocate_element(); + } + + // Given a chunk pointer, return its correctly encoded reference. + ref_t encode_reference_for_chunk(Metachunk* c, bool is_free) const { + const u2 raw_idx = _chunkPool.index_for_elem(c); + return encode_reference(raw_idx, is_free ? free_chunk_marker : used_chunk_marker); + } + + // Release a chunk to the pool. + void release_chunk(Metachunk* c) { + _chunkPool.return_element(c); + } + + //// Helpers for tree traversal //// + class ConstNodeClosure; + static bool iterate_nodes_helper(ref_t ref, ConstNodeClosure* nc) const; + + class ConstChunkClosure; + static bool iterate_chunks_helper(ref_t ref, ChunkClosure* cc) const; + + ////// + + // Root is either a direct pointer to a Metachunk* (in that case, a root chunk of max. size) + // or a pointer to a node. + ref_t _root; + + + +#ifdef ASSERT + // Verify a life node (one which lives in the tree). + static void verify_node(const btnode_t* n); + // Helper for verify() + void verify_helper(bool slow, ref_t ref, const MetaWord* p, int* num_chunks, int* num_nodes) const; +#endif + + // Given a chunk c, split it once. + // + // The original chunk must not be part of a freelist. + // + // Returns pointer to the result chunk; updates the splinters array to return the splintered off chunk. + // + // Returns NULL if chunk cannot be split any further. + Metachunk* split_once(Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]); + + // Given a chunk, attempt to merge it with its sibling if it is free. + // Returns pointer to the result chunk if successful, NULL otherwise. + // + // Returns number of merged chunks, by chunk level, in num_merged array. These numbers + // includes the original chunk. + // + // !!! Please note that if this method returns a non-NULL value, the + // original chunk will be invalid and should not be accessed anymore! !!! + Metachunk* merge_once(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]); + +public: + + ChunkTree() : _root(NULL) {} + + // Initialize: allocate a root node and a root chunk header; return the + // root chunk header. It will be partly initialized. + // Note: this just allocates a memory-less header; memory itself is allocated inside VirtualSpaceNode. + Metachunk* alloc_root_chunk_header(); + + // Given a chunk c, split it recursively until you get a chunk of the given target_level. + // + // The original chunk must not be part of a freelist. + // + // Returns pointer to the result chunk; returns split off chunks in splinters array. + // + // Returns NULL if chunk cannot be split at least once. + Metachunk* split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]); + + // Given a chunk, attempt to merge it recursively with its neighboring chunks. + // + // If successful (merged at least once), returns address of + // the merged chunk; NULL otherwise. + // + // The merged chunks are removed from their freelist; the number of merged chunks is + // returned, split by level, in num_merged array. Note that these numbers does not + // include the original chunk. + // + // !!! Please note that if this method returns a non-NULL value, the + // original chunk will be invalid and should not be accessed anymore! !!! + Metachunk* merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]); + + //// tree traversal //// + + // Iterate over all nodes in this tree. Returns true for complete traversal, + // false if traversal was cancelled. + bool iterate_chunks(ChunkClosure* cc) const; + + + //// Debug stuff //// + + // Verify tree. If base != NULL, it should point to the location assumed + // to be base of the first chunk. + DEBUG_ONLY(void verify(bool slow, const MetaWord* base) const;) + + +}; + + +/////////////////////// +// An C-heap allocated array of chunk trees. Used to describe fragmentation over a range of multiple root chunks. +class ChunkTreeArray { + + const MetaWord* const _base; + const size_t _word_size; + + ChunkTree** _arr; + int _num; + +#ifdef ASSERT + void check_pointer(const MetaWord* p) const { + assert(p >= _base && p < _base + _word_size, "Invalid pointer"); + } +#endif + + int index_by_address(const MetaWord* p) const { + DEBUG_ONLY(check_pointer(p);) + return (p - _base) / chklvl::MAX_CHUNK_WORD_SIZE; + } + +public: + + // Create an array of ChunkTree objects, all initialized to NULL, covering + // a given memory range. Memory range must be aligned to size of root chunks. + ChunkTreeArray(const MetaWord* base, size_t word_size); + + ~ChunkTreeArray(); + + // Given a memory address into the range the trees cover, return the corresponding + // tree. If none existed at this position, create it. + ChunkTree* get_tree_by_address(const MetaWord* p) const { + assert(p >= _base && p < _base + _word_size, "Invalid pointer"); + const int idx = index_by_address(p); + assert(idx >= 0 && idx < _num, "Invalid index"); + if (_arr[idx] == NULL) { + _arr[idx] = new ChunkTree(); + } + return _arr[idx]; + } + + // Iterate over all nodes in all trees. Returns true for complete traversal, + // false if traversal was cancelled. + bool iterate_chunks(ChunkClosure* cc) const; + + DEBUG_ONLY(void verify(bool slow) const;) + +}; + + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_CHUNKTREE_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/classLoaderMetaspace.cpp 2019-07-22 11:08:14.625788970 +0200 @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "logging/log.hpp" +#include "memory/metaspace.hpp" +#include "memory/metaspace/chunkAllocSequence.hpp" +#include "memory/metaspace/classLoaderMetaspace.hpp" +#include "memory/metaspace/spaceManager.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" + +namespace metaspace { + +ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type) + : _lock(lock) + , _space_type(space_type) + , _non_class_space_manager(NULL) + , _class_space_manager(NULL) +{ + DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births)); + + // Initialize non-class spacemanager + _non_class_space_manager = new SpaceManager( + Metaspace::chunk_manager_metadata(), + ChunkAllocSequence::alloc_sequence_by_space_type(space_type, false), + lock); + + // If needed, initialize class spacemanager + if (Metaspace::using_class_space()) { + _class_space_manager = new SpaceManager( + Metaspace::chunk_manager_class(), + ChunkAllocSequence::alloc_sequence_by_space_type(space_type, true), + lock); + } + +} + +ClassLoaderMetaspace::~ClassLoaderMetaspace() { + Metaspace::assert_not_frozen(); + DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths)); + delete _non_class_space_manager; + delete _class_space_manager; +} + +// Allocate word_size words from Metaspace. +MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, bool is_class) { + Metaspace::assert_not_frozen(); + DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs)); + if (is_class && Metaspace::using_class_space()) { + return class_space_manager()->allocate(word_size); + } else { + return non_class_space_manager()->allocate(word_size); + } +} + +// Attempt to expand the GC threshold to be good for at least another word_size words +// and allocate. Returns NULL if failure. Used during Metaspace GC. +MetaWord* ClassLoaderMetaspace::expand_GC_threshold_and_allocate(size_t word_size, bool is_class) { + Metaspace::assert_not_frozen(); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); + assert(delta_bytes > 0, "Must be"); + + size_t before = 0; + size_t after = 0; + bool can_retry = true; + MetaWord* res; + bool incremented; + + // Each thread increments the HWM at most once. Even if the thread fails to increment + // the HWM, an allocation is still attempted. This is because another thread must then + // have incremented the HWM and therefore the allocation might still succeed. + do { + incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry); + res = allocate(word_size, is_class); + } while (!incremented && res == NULL && can_retry); + + if (incremented) { + Metaspace::tracer()->report_gc_threshold(before, after, + MetaspaceGCThresholdUpdater::ExpandAndAllocate); + log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); + } + + return res; +} + +// Prematurely returns a metaspace allocation to the _block_freelists +// because it is not needed anymore. +void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { + + Metaspace::assert_not_frozen(); + DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs)); + + if (is_class && Metaspace::using_class_space()) { + class_space_manager()->deallocate(ptr, word_size); + } else { + non_class_space_manager()->deallocate(ptr, word_size); + } + +} + + +} // end namespace metaspace + + + + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/classLoaderMetaspace.hpp 2019-07-22 11:08:15.149794573 +0200 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP +#define SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP + +#include "memory/allocation.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" +#include "memory/metaspace/spaceManager.hpp" + +namespace metaspace { + + +class ClassLoaderMetaspace : public CHeapObj { + + // The CLD lock. + Mutex* const _lock; + + const Metaspace::MetaspaceType _space_type; + + metaspace::SpaceManager* _non_class_space_manager; + metaspace::SpaceManager* _class_space_manager; + + Mutex* lock() const { return _lock; } + metaspace::SpaceManager* non_class_space_manager() const { return _non_class_space_manager; } + metaspace::SpaceManager* class_space_manager() const { return _class_space_manager; } + + metaspace::SpaceManager* get_space_manager(bool is_class) { + return is_class ? class_space_manager() : non_class_space_manager(); + } + +public: + + ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type); + + ~ClassLoaderMetaspace(); + + // Allocate word_size words from Metaspace. + MetaWord* allocate(size_t word_size, bool is_class); + + // Attempt to expand the GC threshold to be good for at least another word_size words + // and allocate. Returns NULL if failure. Used during Metaspace GC. + MetaWord* expand_GC_threshold_and_allocate(size_t word_size, bool is_class); + + // Prematurely returns a metaspace allocation to the _block_freelists + // because it is not needed anymore. + void deallocate(MetaWord* ptr, size_t word_size, bool is_class); + + + DEBUG_ONLY(void verify(bool slow) const;) + +}; + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_CHUNKMANAGER_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/commitLimiter.cpp 2019-07-22 11:08:15.669800133 +0200 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include +#include "precompiled.hpp" + +#include "runtime/mutexLocker.hpp" +#include "utilities/debug.hpp" + +namespace metaspace { + +// Returns the size, in words, by which we may expand the metaspace committed area without: +// - _cap == 0: hitting GC threshold or the MaxMetaspaceSize +// - _cap > 0: hitting cap (this is just for testing purposes) +static size_t CommitLimiter::possible_expansion_words() const { + + if (_cap > 0) { // Testing. + assert(_cnt.get() <= _cap, "Beyond limit?"); + return _cap - _cnt.get(); + } + + assert(_cnt.get() * BytesPerWord <= MaxMetaspaceSize, "Beyond limit?"); + const size_t words_left_below_max = MaxMetaspaceSize / BytesPerWord - _cnt.get(); + + if () + const size_t below_ + +} + +static CommitLimiter g_global_limiter(0); + +// Returns the global metaspace commit counter +CommitLimiter* CommitLimiter::globalLimiter() { return &g_global_limiter; } + +} // namespace metaspace + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/commitLimiter.hpp 2019-07-22 11:08:16.181805608 +0200 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_COMMITLIMITER_HPP +#define SHARE_MEMORY_METASPACE_COMMITLIMITER_HPP + +#include "memory/allocation.hpp" +#include "memory/metaspace/counter.hpp" + +namespace metaspace { + +class CommitLimiter : public CHeapObj { + + // Counts total words committed for metaspace + SizeCounter _cnt; + + // Purely for testing purposes. + const size_t _cap; + +public: + + CommitLimiter(size_t cap = 0) : _cnt(), _cap(cap) {} + + // Returns the size, in words, by which we may expand the metaspace committed area without: + // - _cap == 0: hitting GC threshold or the MaxMetaspaceSize + // - _cap > 0: hitting cap (this is just for testing purposes) + size_t possible_expansion_words() const; + + void increase_committed(size_t word_size) { _cnt.increment_by(word_size); } + void decrease_committed(size_t word_size) { _cnt.decrement_by(word_size); } + size_t committed_words() const { return _cnt.get(); } + + // Returns the global metaspace commit counter + static CommitLimiter* globalLimiter(); + +}; + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_COMMITLIMITER_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/commitMask.cpp 2019-07-22 11:08:16.693811081 +0200 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "precompiled.hpp" + +#include "memory/metaspace/constants.hpp" +#include "memory/metaspace/commitMask.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" +#include "runtime/stubRoutines.hpp" + +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +namespace metaspace { + +#ifdef ASSERT + +volatile u1 x; + +void CommitMask::verify(bool slow) const { + + // Walk the whole commit mask. + // For each 1 bit, check if the associated granule is accessible. + // For each 0 bit, check if the associated granule is not accessible. Slow mode only. + + assert_is_aligned(_base, constants::commit_granule_bytes); + assert_is_aligned(_word_size, constants::commit_granule_words); + + if (slow) { + assert(CanUseSafeFetch32, "We need SafeFetch for this test."); + } + + for (idx_t i = 0; i < size(); i ++) { + const MetaWord* const p = _base + (i * constants::commit_granule_words); + if (at(i)) { + // Should be accessible. Just touch it. + x ^= (u1*)p; + } else { + // Should not be accessible. + if (slow) { + // Note: results may differ between platforms. On Linux, this should be true since + // we uncommit memory by setting protection to PROT_NONE. We may have to look if + // this works as expected on other platforms. + assert(os::is_readable_pointer(p) == false, "Should not be accessible."); + } + } + } + +} + +#endif // ASSERT + +} // namespace metaspace + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/commitMask.hpp 2019-07-22 11:08:17.205816555 +0200 @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_COMMITMASK_HPP +#define SHARE_MEMORY_METASPACE_COMMITMASK_HPP + +#include "memory/metaspace/constants.hpp" +#include "utilities/debug.hpp" +#include "utilities/bitMap.hpp" +#include "utilities/globalDefinitions.hpp" + +namespace metaspace { + +// A bitmap covering a range of metaspace; each bit in this mask corresponds to +// +class CommitMask : private CHeapBitMap { + + const MetaWord* const _base; + const size_t _word_size; + + // Given an offset, in words, into the area, return the number of the bit + // covering it. + static idx_t bitno_for_word_offset(size_t offset) { + return offset / constants::commit_granule_words; + } + + idx_t bitno_for_address(const MetaWord* p) const { + assert(p >= _base && p < _word_size, "Invalid address"); + const size_t off = p - _base; + return bitno_for_word_offset(off); + } + + static idx_t mask_size(size_t word_size) const { + assert(is_aligned(word_size, constants::commit_granule_words), "size not aligned correctly."); + return bitno_for_word_offset(word_size) + 1; + } + + struct BitCounterClosure : public BitMapClosure { + idx_t cnt; + bool do_bit(BitMap::idx_t offset) { cnt ++; } + }; + + // Missing from BitMap. + // Count 1 bits in range [start, end). + idx_t count_one_bits_in_range(idx_t start, idx_t end) const { + if (start == end) { + return at(start) ? 1 : 0; + } + // TODO: This can be done more efficiently. + BitCounterClosure bcc; + bcc.cnt = 0; + iterate(&bcc, start, end + 1); + return bcc.cnt; + } + +#ifdef ASSERT + // Given a pointer, check if it points into the range this bitmap covers. + bool is_pointer_valid(const MetaWord* p) const { + return p >= _base && p < _base + _word_size; + } + // Given a pointer, check if it points into the range this bitmap covers, + // and if it is aligned to commit granule border. + bool is_pointer_valid_and_aligned(const MetaWord* p) const { + return check_pointer(p) && is_aligned(p, constants::commit_granule_bytes); + } + // Given a pointer, check if it points into the range this bitmap covers. + void check_pointer(const MetaWord* p) const { + assert(is_pointer_valid(p), + "Pointer " PTR_FORMAT " not in range of this bitmap [" PTR_FORMAT ", " PTR_FORMAT ").", + p2i(p), p2i(_base), p2i(_base + _word_size)); + } + // Given a pointer, check if it points into the range this bitmap covers, + // and if it is aligned to commit granule border. + void check_pointer_aligned(const MetaWord* p) const { + check_pointer(p); + assert(is_aligned(p, constants::commit_granule_bytes), + "Pointer " PTR_FORMAT " should be aligned to commit granule size " SIZE_FORMAT ".", p2i(p), constants::commit_granule_bytes); + } + // Given a range, check if it points into the range this bitmap covers, + // and if its borders are aligned to commit granule border. + void check_range(const MetaWord* start, size_t word_size) const { + check_pointer_aligned(start); + check_pointer_aligned(start + word_size); + } +#endif + + // Marks a single commit granule as committed or uncomitted and returns + // its prior state. + bool mark_granule(idx_t bitno, bool value) { + bool b = at(bitno); + at_put(bitno, value); + return b; + } + +public: + + CommitMask(const MetaWord* start, size_t word_size) + : CHeapBitMap(mask_size(word_size)) + , _base(start) + , _word_size(word_size) + {} + + virtual ~CommitMask() {} + + // Given an address, returns true if the address is committed, false if not. + bool is_committed_address(const MetaWord* p) const { + DEBUG_ONLY(check_pointer(p)); + const idx_t bitno = bitno_for_address(p); + return at(bitno); + } + + // Given an address range [start, end), returns true if area is fully committed through. + bool is_fully_committed_range(const MetaWord* start, size_t word_size) const { + DEBUG_ONLY(check_range(start, word_size)); + const idx_t b1 = bitno_for_address(start); + const idx_t b2 = bitno_for_address(start + word_size); + return get_next_zero_offset(b1, b2) == b2; + } + + // Given an address range, return size, in number of words, of committed area within that range. + size_t get_committed_size_in_range(const MetaWord* start, size_t word_size) const { + DEBUG_ONLY(check_range(start, word_size)); + const idx_t b1 = bitno_for_address(start); + const idx_t b2 = bitno_for_address(start + word_size); + const idx_t num_bits = count_one_bits_in_range(b1, b2); + return num_bits * constants::commit_granule_words; + } + + // Return total committed size, in number of words. + size_t get_committed_size() const { + return count_one_bits() * constants::commit_granule_words; + } + + // Mark a whole address range [start, end) as committed. + // Return the number of words which had already been committed before this operation. + size_t mark_range_as_committed(const MetaWord* start, size_t word_size) { + DEBUG_ONLY(check_range(start, word_size)); + const idx_t b1 = bitno_for_address(start); + const idx_t b2 = bitno_for_address(start + word_size); + if (b1 == b2) { // Simple case, 1 granule + return mark_granule(b1, true) ? constants::commit_granule_words : 0; + } + const idx_t bits_set_before = count_one_bits_in_range(bitno_for_address(start), word_size); + set_range(b1, b2); + return bits_set_before * constants::commit_granule_words; + } + + // Mark a whole address range [start, end) as uncommitted. + // Return the number of words which had already been uncommitted before this operation. + size_t mark_range_as_uncommitted(const MetaWord* start, size_t word_size) { + DEBUG_ONLY(check_range(start, word_size)); + const idx_t b1 = bitno_for_address(start); + const idx_t b2 = bitno_for_address(start + word_size); + if (b1 == b2) { // Simple case, 1 granule + return mark_granule(b1, true) ? constants::commit_granule_words : 0; + } + const idx_t bits_set_before = count_one_bits_in_range(bitno_for_address(start), word_size); + clear_range(b1, b2); + return ((b2 - b1) - bits_set_before) * constants::commit_granule_words; + } + + + //// Debug stuff //// + DEBUG_ONLY(void verify(bool slow) const;) + +}; + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_COMMITMASK_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/constants.hpp 2019-07-22 11:08:17.729822155 +0200 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_CONSTANTS_HPP +#define SHARE_MEMORY_METASPACE_CONSTANTS_HPP + +#include "utilities/globalDefinitions.hpp" +#include "memory/metaspace/chunkLevel.hpp" + +namespace metaspace { + +// Constants to be used throughout metaspace + +namespace constants { + +// Granularity, in bytes, metaspace is committed with. +static const size_t commit_granule_bytes = K * 64; +static const size_t commit_granule_words = commit_granule_bytes * BytesPerWord; + + +// Whether or not commit new-born chunks thru after creation. +static const bool newborn_chunks_are_fully_committed = true; + +// When a chunk is handed out by the ChunkManager to a class loader, how much +// of a chunk should be committed up-front? +// Size in words. Must be a multiple of commit_granule_words. +// (Note: 0 is possible but inefficient, since it will cause the ClassLoaderMetaspace +// to commit the first granule right away anyway, so nothing is saved. +// chklvl::MAX_CHUNK_WORD_SIZE pretty much means every chunk is committed thru +// from the start. +static const size_t committed_words_on_fresh_chunks = commit_granule_bytes * 2; + +// The default size of a non-class VirtualSpaceNode (unless created differently). +// Must be a multiple of the root chunk size. +static const size_t virtual_space_node_default_size = chklvl::MAX_CHUNK_BYTE_SIZE * 4; + +static const size_t allocation_from_dictionary_limit = 4 * K; + +} // namespace constants + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_BLOCKFREELIST_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/counter.hpp 2019-07-22 11:08:18.237827585 +0200 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_COUNTER_HPP +#define SHARE_MEMORY_METASPACE_COUNTER_HPP + +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + + +namespace metaspace { + +// A very simple helper class which counts something, offers decrement/increment +// methods and checks for overflow/underflow on increment/decrement. +// +// (since we seem to do that alot....) + +template +class AbstractCounter { + + T _c; + +public: + + AbstractCounter() : _c(0) {} + + T get() const { return _c; } + + void increment() { assert(_c + 1 > _c, "overflow"); _c ++; } + void increment_by(T v) { assert(_c + v >= _c, "overflow"); _c += v; } + void decrement() { assert(_c - 1 < _c, "underflow"); _c --; } + void decrement_by(T v) { assert(_c - v <= _c, "underflow"); _c -= v; } + +#ifdef ASSERT + void check(T expected) const { + assert(_c == expected, "Counter mismatch: %d, expected: %d.", + (int)_c, (int)expected); + } +#endif + +}; + +typedef AbstractCounter SizeCounter; +typedef AbstractCounter IntCounter; + + +template +class AbstractAtomicCounter { + + volatile T _c; + +public: + + AbstractAtomicCounter() : _c(0) {} + + T get() const { return _c; } + + void increment() { assert(_c + 1 > _c, "overflow"); Atomic::inc(&_c); } + void increment_by(T v) { assert(_c + v >= _c, "overflow"); Atomic::add(&_c, v); } + void decrement() { assert(_c - 1 < _c, "underflow"); Atomic::dec(&_c); } + void decrement_by(T v) { assert(_c - v <= _c, "underflow"); Atomic::sub(&_c, v); } + +#ifdef ASSERT + void check(T expected) const { + assert(_c == expected, "Counter mismatch: %d, expected: %d.", + (int)_c, (int)expected); + } +#endif + +}; + +typedef AbstractAtomicCounter SizeAtomicCounter; + + + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_WORDSIZECOUNTER_HPP + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceGC.cpp 2019-07-22 11:08:18.749833058 +0200 @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" + +#include "logging/log.hpp" +#include "memory/metaspace/metaspaceGC.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" +#include "runtime/atomic.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + + + +namespace metaspace { + +volatile size_t MetaspaceGC::_capacity_until_GC = 0; +uint MetaspaceGC::_shrink_factor = 0; +bool MetaspaceGC::_should_concurrent_collect = false; + + + + + + +// MetaspaceGC methods + +// VM_CollectForMetadataAllocation is the vm operation used to GC. +// Within the VM operation after the GC the attempt to allocate the metadata +// should succeed. If the GC did not free enough space for the metaspace +// allocation, the HWM is increased so that another virtualspace will be +// allocated for the metadata. With perm gen the increase in the perm +// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The +// metaspace policy uses those as the small and large steps for the HWM. +// +// After the GC the compute_new_size() for MetaspaceGC is called to +// resize the capacity of the metaspaces. The current implementation +// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used +// to resize the Java heap by some GC's. New flags can be implemented +// if really needed. MinMetaspaceFreeRatio is used to calculate how much +// free space is desirable in the metaspace capacity to decide how much +// to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much +// free space is desirable in the metaspace capacity before decreasing +// the HWM. + +// Calculate the amount to increase the high water mark (HWM). +// Increase by a minimum amount (MinMetaspaceExpansion) so that +// another expansion is not requested too soon. If that is not +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion. +// If that is still not enough, expand by the size of the allocation +// plus some. +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { + size_t min_delta = MinMetaspaceExpansion; + size_t max_delta = MaxMetaspaceExpansion; + size_t delta = align_up(bytes, Metaspace::commit_alignment()); + + if (delta <= min_delta) { + delta = min_delta; + } else if (delta <= max_delta) { + // Don't want to hit the high water mark on the next + // allocation so make the delta greater than just enough + // for this allocation. + delta = max_delta; + } else { + // This allocation is large but the next ones are probably not + // so increase by the minimum. + delta = delta + min_delta; + } + + assert_is_aligned(delta, Metaspace::commit_alignment()); + + return delta; +} + +size_t MetaspaceGC::capacity_until_GC() { + size_t value = OrderAccess::load_acquire(&_capacity_until_GC); + assert(value >= MetaspaceSize, "Not initialized properly?"); + return value; +} + +// Try to increase the _capacity_until_GC limit counter by v bytes. +// Returns true if it succeeded. It may fail if either another thread +// concurrently increased the limit or the new limit would be larger +// than MaxMetaspaceSize. +// On success, optionally returns new and old metaspace capacity in +// new_cap_until_GC and old_cap_until_GC respectively. +// On error, optionally sets can_retry to indicate whether if there is +// actually enough space remaining to satisfy the request. +bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) { + assert_is_aligned(v, Metaspace::commit_alignment()); + + size_t old_capacity_until_GC = _capacity_until_GC; + size_t new_value = old_capacity_until_GC + v; + + if (new_value < old_capacity_until_GC) { + // The addition wrapped around, set new_value to aligned max value. + new_value = align_down(max_uintx, Metaspace::commit_alignment()); + } + + if (new_value > MaxMetaspaceSize) { + if (can_retry != NULL) { + *can_retry = false; + } + return false; + } + + if (can_retry != NULL) { + *can_retry = true; + } + size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC); + + if (old_capacity_until_GC != prev_value) { + return false; + } + + if (new_cap_until_GC != NULL) { + *new_cap_until_GC = new_value; + } + if (old_cap_until_GC != NULL) { + *old_cap_until_GC = old_capacity_until_GC; + } + return true; +} + +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { + assert_is_aligned(v, Metaspace::commit_alignment()); + + return Atomic::sub(v, &_capacity_until_GC); +} + +void MetaspaceGC::initialize() { + // Set the high-water mark to MaxMetapaceSize during VM initializaton since + // we can't do a GC during initialization. + _capacity_until_GC = MaxMetaspaceSize; +} + +void MetaspaceGC::post_initialize() { + // Reset the high-water mark once the VM initialization is done. + _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize); +} + +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { + // Check if the compressed class space is full. + if (is_class && Metaspace::using_class_space()) { + size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType); + if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { + log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)", + (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord)); + return false; + } + } + + // Check if the user has imposed a limit on the metaspace memory. + size_t committed_bytes = MetaspaceUtils::committed_bytes(); + if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { + log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)", + (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord)); + return false; + } + + return true; +} + +size_t MetaspaceGC::allowed_expansion() { + size_t committed_bytes = MetaspaceUtils::committed_bytes(); + size_t capacity_until_gc = capacity_until_GC(); + + assert(capacity_until_gc >= committed_bytes, + "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, + capacity_until_gc, committed_bytes); + + size_t left_until_max = MaxMetaspaceSize - committed_bytes; + size_t left_until_GC = capacity_until_gc - committed_bytes; + size_t left_to_commit = MIN2(left_until_GC, left_until_max); + log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT + " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".", + left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord); + + return left_to_commit / BytesPerWord; +} + +void MetaspaceGC::compute_new_size() { + assert(_shrink_factor <= 100, "invalid shrink factor"); + uint current_shrink_factor = _shrink_factor; + _shrink_factor = 0; + + // Using committed_bytes() for used_after_gc is an overestimation, since the + // chunk free lists are included in committed_bytes() and the memory in an + // un-fragmented chunk free list is available for future allocations. + // However, if the chunk free lists becomes fragmented, then the memory may + // not be available for future allocations and the memory is therefore "in use". + // Including the chunk free lists in the definition of "in use" is therefore + // necessary. Not including the chunk free lists can cause capacity_until_GC to + // shrink below committed_bytes() and this has caused serious bugs in the past. + const size_t used_after_gc = MetaspaceUtils::committed_bytes(); + const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); + + const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; + const double maximum_used_percentage = 1.0 - minimum_free_percentage; + + const double min_tmp = used_after_gc / maximum_used_percentage; + size_t minimum_desired_capacity = + (size_t)MIN2(min_tmp, double(MaxMetaspaceSize)); + // Don't shrink less than the initial generation size + minimum_desired_capacity = MAX2(minimum_desired_capacity, + MetaspaceSize); + + log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); + log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", + minimum_free_percentage, maximum_used_percentage); + log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); + + + size_t shrink_bytes = 0; + if (capacity_until_GC < minimum_desired_capacity) { + // If we have less capacity below the metaspace HWM, then + // increment the HWM. + size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; + expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment()); + // Don't expand unless it's significant + if (expand_bytes >= MinMetaspaceExpansion) { + size_t new_capacity_until_GC = 0; + bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); + assert(succeeded, "Should always succesfully increment HWM when at safepoint"); + + Metaspace::tracer()->report_gc_threshold(capacity_until_GC, + new_capacity_until_GC, + MetaspaceGCThresholdUpdater::ComputeNewSize); + log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", + minimum_desired_capacity / (double) K, + expand_bytes / (double) K, + MinMetaspaceExpansion / (double) K, + new_capacity_until_GC / (double) K); + } + return; + } + + // No expansion, now see if we want to shrink + // We would never want to shrink more than this + assert(capacity_until_GC >= minimum_desired_capacity, + SIZE_FORMAT " >= " SIZE_FORMAT, + capacity_until_GC, minimum_desired_capacity); + size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; + + // Should shrinking be considered? + if (MaxMetaspaceFreeRatio < 100) { + const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; + const double minimum_used_percentage = 1.0 - maximum_free_percentage; + const double max_tmp = used_after_gc / minimum_used_percentage; + size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize)); + maximum_desired_capacity = MAX2(maximum_desired_capacity, + MetaspaceSize); + log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", + maximum_free_percentage, minimum_used_percentage); + log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", + minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); + + assert(minimum_desired_capacity <= maximum_desired_capacity, + "sanity check"); + + if (capacity_until_GC > maximum_desired_capacity) { + // Capacity too large, compute shrinking size + shrink_bytes = capacity_until_GC - maximum_desired_capacity; + // We don't want shrink all the way back to initSize if people call + // System.gc(), because some programs do that between "phases" and then + // we'd just have to grow the heap up again for the next phase. So we + // damp the shrinking: 0% on the first call, 10% on the second call, 40% + // on the third call, and 100% by the fourth call. But if we recompute + // size without shrinking, it goes back to 0%. + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + + shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment()); + + assert(shrink_bytes <= max_shrink_bytes, + "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, + shrink_bytes, max_shrink_bytes); + if (current_shrink_factor == 0) { + _shrink_factor = 10; + } else { + _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); + } + log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", + MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); + log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", + shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); + } + } + + // Don't shrink unless it's significant + if (shrink_bytes >= MinMetaspaceExpansion && + ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { + size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); + Metaspace::tracer()->report_gc_threshold(capacity_until_GC, + new_capacity_until_GC, + MetaspaceGCThresholdUpdater::ComputeNewSize); + } +} + + + +} // namespace metaspace + + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceGC.hpp 2019-07-22 11:08:19.261838529 +0200 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_METASPACEGC_HPP +#define SHARE_MEMORY_METASPACE_METASPACEGC_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +namespace metaspace { + +// Metaspace are deallocated when their class loader are GC'ed. +// This class implements a policy for inducing GC's to recover +// Metaspaces. + +class MetaspaceGC : public AllStatic { + + // The current high-water-mark for inducing a GC. + // When committed memory of all metaspaces reaches this value, + // a GC is induced and the value is increased. Size is in bytes. + static volatile size_t _capacity_until_GC; + + // For a CMS collection, signal that a concurrent collection should + // be started. + static bool _should_concurrent_collect; + + static uint _shrink_factor; + + static size_t shrink_factor() { return _shrink_factor; } + void set_shrink_factor(uint v) { _shrink_factor = v; } + + public: + + static void initialize(); + static void post_initialize(); + + static size_t capacity_until_GC(); + static bool inc_capacity_until_GC(size_t v, + size_t* new_cap_until_GC = NULL, + size_t* old_cap_until_GC = NULL, + bool* can_retry = NULL); + static size_t dec_capacity_until_GC(size_t v); + + static bool should_concurrent_collect() { return _should_concurrent_collect; } + static void set_should_concurrent_collect(bool v) { + _should_concurrent_collect = v; + } + + // The amount to increase the high-water-mark (_capacity_until_GC) + static size_t delta_capacity_until_GC(size_t bytes); + + // Tells if we have can expand metaspace without hitting set limits. + static bool can_expand(size_t words, bool is_class); + + // Returns amount that we can expand without hitting a GC, + // measured in words. + static size_t allowed_expansion(); + + // Calculate the new high-water mark at which to induce + // a GC. + static void compute_new_size(); +}; + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_METASPACEGC_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceReport.cpp 2019-07-22 11:08:19.777844043 +0200 @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP and/or its affiliates. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" +#include "memory/metaspace/metaspaceDCmd.hpp" +#include "memory/resourceArea.hpp" +#include "services/diagnosticCommand.hpp" +#include "services/nmtCommon.hpp" + +namespace metaspace { + +static const char* space_type_name(Metaspace::MetaspaceType t) { + const char* s = NULL; + switch (t) { + case Metaspace::StandardMetaspaceType: s = "Standard"; break; + case Metaspace::BootMetaspaceType: s = "Boot"; break; + case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break; + case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break; + default: ShouldNotReachHere(); + } + return s; +} + +static void print_vs(outputStream* out, size_t scale) { + const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord); + const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord); + { + if (Metaspace::using_class_space()) { + out->print(" Non-class space: "); + } + print_scaled_words(out, reserved_nonclass_words, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7); + out->print_cr(" committed "); + + if (Metaspace::using_class_space()) { + const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord); + const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord); + out->print(" Class space: "); + print_scaled_words(out, reserved_class_words, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7); + out->print_cr(" committed "); + + const size_t reserved_words = reserved_nonclass_words + reserved_class_words; + const size_t committed_words = committed_nonclass_words + committed_class_words; + out->print(" Both: "); + print_scaled_words(out, reserved_words, scale, 7); + out->print(" reserved, "); + print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7); + out->print_cr(" committed "); + } + } +} + +static void print_basic_switches(outputStream* out, size_t scale) { + out->print("MaxMetaspaceSize: "); + if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) { + // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real + // value is smaller. + out->print("unlimited"); + } else { + print_human_readable_size(out, MaxMetaspaceSize, scale); + } + out->cr(); + if (Metaspace::using_class_space()) { + out->print("CompressedClassSpaceSize: "); + print_human_readable_size(out, CompressedClassSpaceSize, scale); + } + out->cr(); +} + +// This will print out a basic metaspace usage report but +// unlike print_report() is guaranteed not to lock or to walk the CLDG. +void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) { + + if (!Metaspace::initialized()) { + out->print_cr("Metaspace not yet initialized."); + return; + } + + out->cr(); + out->print_cr("Usage:"); + + if (Metaspace::using_class_space()) { + out->print(" Non-class: "); + } + + // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from + // MetaspaceUtils. + const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType); + const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType); + const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType); + const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc; + + print_scaled_words(out, cap_nc, scale, 5); + out->print(" capacity, "); + print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5); + out->print(" used, "); + print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5); + out->print(" free+waste, "); + print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5); + out->print(" overhead. "); + out->cr(); + + if (Metaspace::using_class_space()) { + const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType); + const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType); + const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType); + const size_t free_and_waste_c = cap_c - overhead_c - used_c; + out->print(" Class: "); + print_scaled_words(out, cap_c, scale, 5); + out->print(" capacity, "); + print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5); + out->print(" used, "); + print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5); + out->print(" free+waste, "); + print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5); + out->print(" overhead. "); + out->cr(); + + out->print(" Both: "); + const size_t cap = cap_nc + cap_c; + + print_scaled_words(out, cap, scale, 5); + out->print(" capacity, "); + print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5); + out->print(" used, "); + print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5); + out->print(" free+waste, "); + print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5); + out->print(" overhead. "); + out->cr(); + } + + out->cr(); + out->print_cr("Virtual space:"); + + print_vs(out, scale); + + out->cr(); + out->print_cr("Chunk freelists:"); + + if (Metaspace::using_class_space()) { + out->print(" Non-Class: "); + } + print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale); + out->cr(); + if (Metaspace::using_class_space()) { + out->print(" Class: "); + print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale); + out->cr(); + out->print(" Both: "); + print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() + + Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale); + out->cr(); + } + + out->cr(); + + // Print basic settings + print_basic_switches(out, scale); + + out->cr(); + +} + +void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) { + + if (!Metaspace::initialized()) { + out->print_cr("Metaspace not yet initialized."); + return; + } + + const bool print_loaders = (flags & rf_show_loaders) > 0; + const bool print_classes = (flags & rf_show_classes) > 0; + const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0; + const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0; + + // Some report options require walking the class loader data graph. + PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype); + if (print_loaders) { + out->cr(); + out->print_cr("Usage per loader:"); + out->cr(); + } + + ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print + + // Print totals, broken up by space type. + if (print_by_spacetype) { + out->cr(); + out->print_cr("Usage per space type:"); + out->cr(); + for (int space_type = (int)Metaspace::ZeroMetaspaceType; + space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++) + { + uintx num_loaders = cl._num_loaders_by_spacetype[space_type]; + uintx num_classes = cl._num_classes_by_spacetype[space_type]; + out->print("%s - " UINTX_FORMAT " %s", + space_type_name((Metaspace::MetaspaceType)space_type), + num_loaders, loaders_plural(num_loaders)); + if (num_classes > 0) { + out->print(", "); + print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]); + out->print(":"); + cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype); + } else { + out->print("."); + out->cr(); + } + out->cr(); + } + } + + // Print totals for in-use data: + out->cr(); + { + uintx num_loaders = cl._num_loaders; + out->print("Total Usage - " UINTX_FORMAT " %s, ", + num_loaders, loaders_plural(num_loaders)); + print_number_of_classes(out, cl._num_classes, cl._num_classes_shared); + out->print(":"); + cl._stats_total.print_on(out, scale, print_by_chunktype); + out->cr(); + } + + // -- Print Virtual space. + out->cr(); + out->print_cr("Virtual space:"); + + print_vs(out, scale); + + // -- Print VirtualSpaceList details. + if ((flags & rf_show_vslist) > 0) { + out->cr(); + out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : ""); + + if (Metaspace::using_class_space()) { + out->print_cr(" Non-Class:"); + } + Metaspace::space_list()->print_on(out, scale); + if (Metaspace::using_class_space()) { + out->print_cr(" Class:"); + Metaspace::class_space_list()->print_on(out, scale); + } + } + out->cr(); + + // -- Print VirtualSpaceList map. + if ((flags & rf_show_vsmap) > 0) { + out->cr(); + out->print_cr("Virtual space map:"); + + if (Metaspace::using_class_space()) { + out->print_cr(" Non-Class:"); + } + Metaspace::space_list()->print_map(out); + if (Metaspace::using_class_space()) { + out->print_cr(" Class:"); + Metaspace::class_space_list()->print_map(out); + } + } + out->cr(); + + // -- Print Freelists (ChunkManager) details + out->cr(); + out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : ""); + + ChunkManagerStatistics non_class_cm_stat; + Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat); + + if (Metaspace::using_class_space()) { + out->print_cr(" Non-Class:"); + } + non_class_cm_stat.print_on(out, scale); + + if (Metaspace::using_class_space()) { + ChunkManagerStatistics class_cm_stat; + Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat); + out->print_cr(" Class:"); + class_cm_stat.print_on(out, scale); + } + + // As a convenience, print a summary of common waste. + out->cr(); + out->print("Waste "); + // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace. + const size_t committed_words = committed_bytes() / BytesPerWord; + + out->print("(percentages refer to total committed size "); + print_scaled_words(out, committed_words, scale); + out->print_cr("):"); + + // Print space committed but not yet used by any class loader + const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord; + out->print(" Committed unused: "); + print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6); + out->cr(); + + // Print waste for in-use chunks. + UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals(); + UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals(); + UsedChunksStatistics ucs_all; + ucs_all.add(ucs_nonclass); + ucs_all.add(ucs_class); + + out->print(" Waste in chunks in use: "); + print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6); + out->cr(); + out->print(" Free in chunks in use: "); + print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6); + out->cr(); + out->print(" Overhead in chunks in use: "); + print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6); + out->cr(); + + // Print waste in free chunks. + const size_t total_capacity_in_free_chunks = + Metaspace::chunk_manager_metadata()->free_chunks_total_words() + + (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0); + out->print(" In free chunks: "); + print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6); + out->cr(); + + // Print waste in deallocated blocks. + const uintx free_blocks_num = + cl._stats_total.nonclass_sm_stats().free_blocks_num() + + cl._stats_total.class_sm_stats().free_blocks_num(); + const size_t free_blocks_cap_words = + cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() + + cl._stats_total.class_sm_stats().free_blocks_cap_words(); + out->print("Deallocated from chunks in use: "); + print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6); + out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num); + out->cr(); + + // Print total waste. + const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks + + free_blocks_cap_words + unused_words_in_vs; + out->print(" -total-: "); + print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6); + out->cr(); + + // Print internal statistics +#ifdef ASSERT + out->cr(); + out->cr(); + out->print_cr("Internal statistics:"); + out->cr(); + out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs); + out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births); + out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths); + out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created); + out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged); + out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded); + out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs); + out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks); + out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".", + g_internal_statistics.num_chunks_added_to_freelist); + out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".", + g_internal_statistics.num_chunks_removed_from_freelist); + out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".", + g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits); + + out->cr(); +#endif + + // Print some interesting settings + out->cr(); + out->cr(); + print_basic_switches(out, scale); + + out->cr(); + out->print("InitialBootClassLoaderMetaspaceSize: "); + print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale); + + out->cr(); + out->cr(); + +} // MetaspaceUtils::print_report() + +} // namespace metaspace + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/metaspaceReport.hpp 2019-07-22 11:08:20.289849514 +0200 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_METASPACEREPORT_HPP +#define SHARE_MEMORY_METASPACE_METASPACEREPORT_HPP + +#include "memory/allocation.hpp" + +namespace metaspace { + +class MetaspaceReporter : public AllStatic { +public: + + // Flags for print_report(). + enum ReportFlag { + // Show usage by class loader. + rf_show_loaders = (1 << 0), + // Breaks report down by chunk type (small, medium, ...). + rf_break_down_by_chunktype = (1 << 1), + // Breaks report down by space type (anonymous, reflection, ...). + rf_break_down_by_spacetype = (1 << 2), + // Print details about the underlying virtual spaces. + rf_show_vslist = (1 << 3), + // Print metaspace map. + rf_show_vsmap = (1 << 4), + // If show_loaders: show loaded classes for each loader. + rf_show_classes = (1 << 5) + }; + + // This will print out a basic metaspace usage report but + // unlike print_report() is guaranteed not to lock or to walk the CLDG. + static void print_basic_report(outputStream* st, size_t scale); + + // Prints a report about the current metaspace state. + // Optional parts can be enabled via flags. + // Function will walk the CLDG and will lock the expand lock; if that is not + // convenient, use print_basic_report() instead. + static void print_report(outputStream* out, size_t scale = 0, int flags = 0); + +}; + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_METASPACEREPORT_HPP --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/runningCounters.cpp 2019-07-22 11:08:20.805855027 +0200 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/metaspace/counter.hpp" +#include "memory/metaspace/runningCounters.hpp" +#include "memory/metaspace/virtualSpaceList.hpp" +#include "memory/metaspace/chunkManager.hpp" + +namespace metaspace { + +// Return reserved size, in words, for Metaspace +size_t RunningCounters::reserved_words() { + return reserved_words_class() + reserved_words_nonclass(); +} + +size_t RunningCounters::reserved_words_class() { + VirtualSpaceList* vs = VirtualSpaceList::vslist_class(); + return vs != NULL ? vs->reserved_words() : 0; +} + +size_t RunningCounters::reserved_words_nonclass() { + return VirtualSpaceList::vslist_nonclass()->reserved_words(); +} + +// Return total committed size, in words, for Metaspace +size_t RunningCounters::committed_words() { + return committed_words_class() + committed_words_nonclass(); +} + +size_t RunningCounters::committed_words_class() { + VirtualSpaceList* vs = VirtualSpaceList::vslist_class(); + return vs != NULL ? vs->committed_words() : 0; +} + +size_t RunningCounters::committed_words_nonclass() { + return VirtualSpaceList::vslist_nonclass()->committed_words(); +} + + +// ---- used chunks ----- + +// Returns size, in words, used for metadata. +size_t RunningCounters::used_words() { + return used_words_class() + used_words_nonclass(); +} + +size_t RunningCounters::used_words_class() { + return _used_class.get(); +} + +size_t RunningCounters::used_words_nonclass() { + return _used_nonclass.get(); +} + +// ---- free chunks ----- + +// Returns size, in words, of all chunks in all freelists. +size_t RunningCounters::free_chunks_words() { + return free_chunks_words_class() + free_chunks_words_nonclass(); +} + +size_t RunningCounters::free_chunks_words_class() { + ChunkManager* cm = ChunkManager::chunkmanager_class(); + return cm != NULL ? cm->total_word_size() : 0; +} + +size_t RunningCounters::free_chunks_words_nonclass() { + return ChunkManager::chunkmanager_nonclass()->total_word_size(); +} + +} // namespace metaspace + + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/src/hotspot/share/memory/metaspace/runningCounters.hpp 2019-07-22 11:08:21.341860753 +0200 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP +#define SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP + +#include "memory/allocation.hpp" +#include "memory/metaspace/counter.hpp" + +namespace metaspace { + +class ClassLoaderMetaspace; + +// These are running counters for some basic Metaspace statistics. +// Their value can be obtained quickly without locking. + +class RunningCounters : public AllStatic { + + friend class ClassLoaderMetaspace; + + // ---- in-use chunks ---- + + // Used space, in words. + // (Note that the used counter is on the hot path of Metaspace allocation. + // Do we really need it? We may get by with capacity only and get more details + // with get_statistics_slow().) + SizeAtomicCounter _used_class; + SizeAtomicCounter _used_nonclass; + +public: + + // ---- virtual memory ----- + + // Return reserved size, in words, for Metaspace + static size_t reserved_words(); + static size_t reserved_words_class(); + static size_t reserved_words_nonclass(); + + // Return total committed size, in words, for Metaspace + static size_t committed_words(); + static size_t committed_words_class(); + static size_t committed_words_nonclass(); + + + // ---- used chunks ----- + + // Returns size, in words, used for metadata. + static size_t used_words(); + static size_t used_words_class(); + static size_t used_words_nonclass(); + + // ---- free chunks ----- + + // Returns size, in words, of all chunks in all freelists. + static size_t free_chunks_words(); + static size_t free_chunks_words_class(); + static size_t free_chunks_words_nonclass(); + + +}; + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP + + --- /dev/null 2019-07-22 08:07:50.621255384 +0200 +++ new/test/hotspot/gtest/metaspace/test_pool.cpp 2019-07-22 11:08:21.857866266 +0200 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019, SAP. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" + +#include "memory/allocation.hpp" +#include "memory/metaspace/abstractPool.hpp" +#include "runtime/os.hpp" + +#include "unittest.hpp" + +// Test AbstractPool class + +template +class AbstractPoolTest { + + typedef metaspace::AbstractPool PoolType; + + void test_exhaustion() { + PoolType pool; + for (I i = 0; i < max_size + 10; i ++) { + E* e = pool.allocate_element(); + if (i < max_size) { + ASSERT_EQUALS(e != NULL); + ASSERT_EQUALS(pool.get_used(), i); + } else { + ASSERT_EQUALS(e == NULL); + ASSERT_EQUALS(pool.get_used(), max_size); + } + } + ASSERT_EQUALS(pool.memory_footprint() == max_size * sizeof(E)); + } + + void random_alloc_free() { + PoolType pool; + E* elems = NEW_C_HEAP_ARRAY(E*, max_size, mtInternal); + int allocated = 0; + for (int iter = 0; iter < 1000; iter ++) { + I idx = (I)os::random() % max_size; + if (elems[idx] == NULL) { + elems[idx] = pool.allocate_element(); + } else { + pool.return_element(elems[idx]); + } + if ((os::random() & 1) > 0) { + + } else + for (I i = 0; i < max_size; i ++) { + + } + } + FREE_C_HEAP_ARRAY(elems); + } + +public: + + void do_test() { + test_exhaustion(); + } + + +}; + + --- old/src/hotspot/share/memory/metaspace/occupancyMap.cpp 2019-07-22 11:08:22.613874341 +0200 +++ /dev/null 2019-07-22 08:07:50.621255384 +0200 @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" -#include "memory/metaspace/metachunk.hpp" -#include "memory/metaspace/occupancyMap.hpp" -#include "runtime/os.hpp" - -namespace metaspace { - -OccupancyMap::OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) : - _reference_address(reference_address), _word_size(word_size), - _smallest_chunk_word_size(smallest_chunk_word_size) -{ - assert(reference_address != NULL, "invalid reference address"); - assert(is_aligned(reference_address, smallest_chunk_word_size), - "Reference address not aligned to smallest chunk size."); - assert(is_aligned(word_size, smallest_chunk_word_size), - "Word_size shall be a multiple of the smallest chunk size."); - // Calculate bitmap size: one bit per smallest_chunk_word_size'd area. - size_t num_bits = word_size / smallest_chunk_word_size; - _map_size = (num_bits + 7) / 8; - assert(_map_size * 8 >= num_bits, "sanity"); - _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal); - _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal); - assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed."); - memset(_map[1], 0, _map_size); - memset(_map[0], 0, _map_size); - // Sanity test: the first respectively last possible chunk start address in - // the covered range shall map to the first and last bit in the bitmap. - assert(get_bitpos_for_address(reference_address) == 0, - "First chunk address in range must map to fist bit in bitmap."); - assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1, - "Last chunk address in range must map to last bit in bitmap."); -} - -OccupancyMap::~OccupancyMap() { - os::free(_map[0]); - os::free(_map[1]); -} - -#ifdef ASSERT -// Verify occupancy map for the address range [from, to). -// We need to tell it the address range, because the memory the -// occupancy map is covering may not be fully comitted yet. -void OccupancyMap::verify(MetaWord* from, MetaWord* to) { - Metachunk* chunk = NULL; - int nth_bit_for_chunk = 0; - MetaWord* chunk_end = NULL; - for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) { - const unsigned pos = get_bitpos_for_address(p); - // Check the chunk-starts-info: - if (get_bit_at_position(pos, layer_chunk_start_map)) { - // Chunk start marked in bitmap. - chunk = (Metachunk*) p; - if (chunk_end != NULL) { - assert(chunk_end == p, "Unexpected chunk start found at %p (expected " - "the next chunk to start at %p).", p, chunk_end); - } - assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p); - if (chunk->get_chunk_type() != HumongousIndex) { - guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p); - } - chunk_end = p + chunk->word_size(); - nth_bit_for_chunk = 0; - assert(chunk_end <= to, "Chunk end overlaps test address range."); - } else { - // No chunk start marked in bitmap. - assert(chunk != NULL, "Chunk should start at start of address range."); - assert(p < chunk_end, "Did not find expected chunk start at %p.", p); - nth_bit_for_chunk ++; - } - // Check the in-use-info: - const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map); - if (in_use_bit) { - assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).", - chunk, nth_bit_for_chunk); - } else { - assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).", - chunk, nth_bit_for_chunk); - } - } -} - -// Verify that a given chunk is correctly accounted for in the bitmap. -void OccupancyMap::verify_for_chunk(Metachunk* chunk) { - assert(chunk_starts_at_address((MetaWord*) chunk), - "No chunk start marked in map for chunk %p.", chunk); - // For chunks larger than the minimal chunk size, no other chunk - // must start in its area. - if (chunk->word_size() > _smallest_chunk_word_size) { - assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size, - chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map), - "No chunk must start within another chunk."); - } - if (!chunk->is_tagged_free()) { - assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()), - "Chunk %p is in use but marked as free in map (%d %d).", - chunk, chunk->get_chunk_type(), chunk->get_origin()); - } else { - assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()), - "Chunk %p is free but marked as in-use in map (%d %d).", - chunk, chunk->get_chunk_type(), chunk->get_origin()); - } -} - -#endif // ASSERT - -} // namespace metaspace - - --- old/src/hotspot/share/memory/metaspace/occupancyMap.hpp 2019-07-22 11:08:23.001878487 +0200 +++ /dev/null 2019-07-22 08:07:50.621255384 +0200 @@ -1,242 +0,0 @@ -/* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2018 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP -#define SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP - -#include "memory/allocation.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - - -namespace metaspace { - -class Metachunk; - -// Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant. -template struct all_ones { static const T value; }; -template <> struct all_ones { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; }; -template <> struct all_ones { static const uint32_t value = 0xFFFFFFFF; }; - -// The OccupancyMap is a bitmap which, for a given VirtualSpaceNode, -// keeps information about -// - where a chunk starts -// - whether a chunk is in-use or free -// A bit in this bitmap represents one range of memory in the smallest -// chunk size (SpecializedChunk or ClassSpecializedChunk). -class OccupancyMap : public CHeapObj { - - // The address range this map covers. - const MetaWord* const _reference_address; - const size_t _word_size; - - // The word size of a specialized chunk, aka the number of words one - // bit in this map represents. - const size_t _smallest_chunk_word_size; - - // map data - // Data are organized in two bit layers: - // The first layer is the chunk-start-map. Here, a bit is set to mark - // the corresponding region as the head of a chunk. - // The second layer is the in-use-map. Here, a set bit indicates that - // the corresponding belongs to a chunk which is in use. - uint8_t* _map[2]; - - enum { layer_chunk_start_map = 0, layer_in_use_map = 1 }; - - // length, in bytes, of bitmap data - size_t _map_size; - - // Returns true if bit at position pos at bit-layer layer is set. - bool get_bit_at_position(unsigned pos, unsigned layer) const { - assert(layer == 0 || layer == 1, "Invalid layer %d", layer); - const unsigned byteoffset = pos / 8; - assert(byteoffset < _map_size, - "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size); - const unsigned mask = 1 << (pos % 8); - return (_map[layer][byteoffset] & mask) > 0; - } - - // Changes bit at position pos at bit-layer layer to value v. - void set_bit_at_position(unsigned pos, unsigned layer, bool v) { - assert(layer == 0 || layer == 1, "Invalid layer %d", layer); - const unsigned byteoffset = pos / 8; - assert(byteoffset < _map_size, - "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size); - const unsigned mask = 1 << (pos % 8); - if (v) { - _map[layer][byteoffset] |= mask; - } else { - _map[layer][byteoffset] &= ~mask; - } - } - - // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access: - // pos is 32/64 aligned and num_bits is 32/64. - // This is the typical case when coalescing to medium chunks, whose size is - // 32 or 64 times the specialized chunk size (depending on class or non class - // case), so they occupy 64 bits which should be 64bit aligned, because - // chunks are chunk-size aligned. - template - bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const { - assert(_map_size > 0, "not initialized"); - assert(layer == 0 || layer == 1, "Invalid layer %d.", layer); - assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos); - assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits); - const size_t byteoffset = pos / 8; - assert(byteoffset <= (_map_size - sizeof(T)), - "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size); - const T w = *(T*)(_map[layer] + byteoffset); - return w > 0 ? true : false; - } - - // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer. - bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const { - if (pos % 32 == 0 && num_bits == 32) { - return is_any_bit_set_in_region_3264(pos, num_bits, layer); - } else if (pos % 64 == 0 && num_bits == 64) { - return is_any_bit_set_in_region_3264(pos, num_bits, layer); - } else { - for (unsigned n = 0; n < num_bits; n ++) { - if (get_bit_at_position(pos + n, layer)) { - return true; - } - } - } - return false; - } - - // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer. - bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const { - assert(word_size % _smallest_chunk_word_size == 0, - "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size); - const unsigned pos = get_bitpos_for_address(p); - const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size); - return is_any_bit_set_in_region(pos, num_bits, layer); - } - - // Optimized case of set_bits_of_region for 32/64bit aligned access: - // pos is 32/64 aligned and num_bits is 32/64. - // This is the typical case when coalescing to medium chunks, whose size - // is 32 or 64 times the specialized chunk size (depending on class or non - // class case), so they occupy 64 bits which should be 64bit aligned, - // because chunks are chunk-size aligned. - template - void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) { - assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).", - (unsigned)(sizeof(T) * 8), pos); - assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.", - num_bits, (unsigned)(sizeof(T) * 8)); - const size_t byteoffset = pos / 8; - assert(byteoffset <= (_map_size - sizeof(T)), - "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size); - T* const pw = (T*)(_map[layer] + byteoffset); - *pw = v ? all_ones::value : (T) 0; - } - - // Set all bits in a region starting at pos to a value. - void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) { - assert(_map_size > 0, "not initialized"); - assert(layer == 0 || layer == 1, "Invalid layer %d.", layer); - if (pos % 32 == 0 && num_bits == 32) { - set_bits_of_region_T(pos, num_bits, layer, v); - } else if (pos % 64 == 0 && num_bits == 64) { - set_bits_of_region_T(pos, num_bits, layer, v); - } else { - for (unsigned n = 0; n < num_bits; n ++) { - set_bit_at_position(pos + n, layer, v); - } - } - } - - // Helper: sets all bits in a region [p, p+word_size). - void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) { - assert(word_size % _smallest_chunk_word_size == 0, - "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size); - const unsigned pos = get_bitpos_for_address(p); - const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size); - set_bits_of_region(pos, num_bits, layer, v); - } - - // Helper: given an address, return the bit position representing that address. - unsigned get_bitpos_for_address(const MetaWord* p) const { - assert(_reference_address != NULL, "not initialized"); - assert(p >= _reference_address && p < _reference_address + _word_size, - "Address %p out of range for occupancy map [%p..%p).", - p, _reference_address, _reference_address + _word_size); - assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)), - "Address not aligned (%p).", p); - const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size; - assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity."); - return (unsigned) d; - } - - public: - - OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size); - ~OccupancyMap(); - - // Returns true if at address x a chunk is starting. - bool chunk_starts_at_address(MetaWord* p) const { - const unsigned pos = get_bitpos_for_address(p); - return get_bit_at_position(pos, layer_chunk_start_map); - } - - void set_chunk_starts_at_address(MetaWord* p, bool v) { - const unsigned pos = get_bitpos_for_address(p); - set_bit_at_position(pos, layer_chunk_start_map, v); - } - - // Removes all chunk-start-bits inside a region, typically as a - // result of a chunk merge. - void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) { - set_bits_of_region(p, word_size, layer_chunk_start_map, false); - } - - // Returns true if there are life (in use) chunks in the region limited - // by [p, p+word_size). - bool is_region_in_use(MetaWord* p, size_t word_size) const { - return is_any_bit_set_in_region(p, word_size, layer_in_use_map); - } - - // Marks the region starting at p with the size word_size as in use - // or free, depending on v. - void set_region_in_use(MetaWord* p, size_t word_size, bool v) { - set_bits_of_region(p, word_size, layer_in_use_map, v); - } - - // Verify occupancy map for the address range [from, to). - // We need to tell it the address range, because the memory the - // occupancy map is covering may not be fully comitted yet. - DEBUG_ONLY(void verify(MetaWord* from, MetaWord* to);) - - // Verify that a given chunk is correctly accounted for in the bitmap. - DEBUG_ONLY(void verify_for_chunk(Metachunk* chunk);) - -}; - -} // namespace metaspace - -#endif // SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP