1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "logging/log.hpp"
  27 #include "memory/metaspace/metaspaceGC.hpp"
  28 #include "memory/metaspace/metaspaceCommon.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "utilities/align.hpp"
  31 #include "utilities/debug.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 
  34 
  35 
  36 namespace metaspace {
  37 
  38 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
  39 uint MetaspaceGC::_shrink_factor = 0;
  40 bool MetaspaceGC::_should_concurrent_collect = false;
  41 
  42 
  43 
  44 
  45 
  46 
  47 // MetaspaceGC methods
  48 
  49 // VM_CollectForMetadataAllocation is the vm operation used to GC.
  50 // Within the VM operation after the GC the attempt to allocate the metadata
  51 // should succeed.  If the GC did not free enough space for the metaspace
  52 // allocation, the HWM is increased so that another virtualspace will be
  53 // allocated for the metadata.  With perm gen the increase in the perm
  54 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  55 // metaspace policy uses those as the small and large steps for the HWM.
  56 //
  57 // After the GC the compute_new_size() for MetaspaceGC is called to
  58 // resize the capacity of the metaspaces.  The current implementation
  59 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
  60 // to resize the Java heap by some GC's.  New flags can be implemented
  61 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
  62 // free space is desirable in the metaspace capacity to decide how much
  63 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
  64 // free space is desirable in the metaspace capacity before decreasing
  65 // the HWM.
  66 
  67 // Calculate the amount to increase the high water mark (HWM).
  68 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  69 // another expansion is not requested too soon.  If that is not
  70 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
  71 // If that is still not enough, expand by the size of the allocation
  72 // plus some.
  73 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
  74   size_t min_delta = MinMetaspaceExpansion;
  75   size_t max_delta = MaxMetaspaceExpansion;
  76   size_t delta = align_up(bytes, Metaspace::commit_alignment());
  77 
  78   if (delta <= min_delta) {
  79     delta = min_delta;
  80   } else if (delta <= max_delta) {
  81     // Don't want to hit the high water mark on the next
  82     // allocation so make the delta greater than just enough
  83     // for this allocation.
  84     delta = max_delta;
  85   } else {
  86     // This allocation is large but the next ones are probably not
  87     // so increase by the minimum.
  88     delta = delta + min_delta;
  89   }
  90 
  91   assert_is_aligned(delta, Metaspace::commit_alignment());
  92 
  93   return delta;
  94 }
  95 
  96 size_t MetaspaceGC::capacity_until_GC() {
  97   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
  98   assert(value >= MetaspaceSize, "Not initialized properly?");
  99   return value;
 100 }
 101 
 102 // Try to increase the _capacity_until_GC limit counter by v bytes.
 103 // Returns true if it succeeded. It may fail if either another thread
 104 // concurrently increased the limit or the new limit would be larger
 105 // than MaxMetaspaceSize.
 106 // On success, optionally returns new and old metaspace capacity in
 107 // new_cap_until_GC and old_cap_until_GC respectively.
 108 // On error, optionally sets can_retry to indicate whether if there is
 109 // actually enough space remaining to satisfy the request.
 110 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 111   assert_is_aligned(v, Metaspace::commit_alignment());
 112 
 113   size_t old_capacity_until_GC = _capacity_until_GC;
 114   size_t new_value = old_capacity_until_GC + v;
 115 
 116   if (new_value < old_capacity_until_GC) {
 117     // The addition wrapped around, set new_value to aligned max value.
 118     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 119   }
 120 
 121   if (new_value > MaxMetaspaceSize) {
 122     if (can_retry != NULL) {
 123       *can_retry = false;
 124     }
 125     return false;
 126   }
 127 
 128   if (can_retry != NULL) {
 129     *can_retry = true;
 130   }
 131   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 132 
 133   if (old_capacity_until_GC != prev_value) {
 134     return false;
 135   }
 136 
 137   if (new_cap_until_GC != NULL) {
 138     *new_cap_until_GC = new_value;
 139   }
 140   if (old_cap_until_GC != NULL) {
 141     *old_cap_until_GC = old_capacity_until_GC;
 142   }
 143   return true;
 144 }
 145 
 146 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 147   assert_is_aligned(v, Metaspace::commit_alignment());
 148 
 149   return Atomic::sub(v, &_capacity_until_GC);
 150 }
 151 
 152 void MetaspaceGC::initialize() {
 153   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 154   // we can't do a GC during initialization.
 155   _capacity_until_GC = MaxMetaspaceSize;
 156 }
 157 
 158 void MetaspaceGC::post_initialize() {
 159   // Reset the high-water mark once the VM initialization is done.
 160   _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
 161 }
 162 
 163 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
 164   // Check if the compressed class space is full.
 165   if (is_class && Metaspace::using_class_space()) {
 166     size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
 167     if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
 168       log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
 169                 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
 170       return false;
 171     }
 172   }
 173 
 174   // Check if the user has imposed a limit on the metaspace memory.
 175   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 176   if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
 177     log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
 178               (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
 179     return false;
 180   }
 181 
 182   return true;
 183 }
 184 
 185 size_t MetaspaceGC::allowed_expansion() {
 186   size_t committed_bytes = MetaspaceUtils::committed_bytes();
 187   size_t capacity_until_gc = capacity_until_GC();
 188 
 189   assert(capacity_until_gc >= committed_bytes,
 190          "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
 191          capacity_until_gc, committed_bytes);
 192 
 193   size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
 194   size_t left_until_GC = capacity_until_gc - committed_bytes;
 195   size_t left_to_commit = MIN2(left_until_GC, left_until_max);
 196   log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
 197             " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
 198             left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
 199 
 200   return left_to_commit / BytesPerWord;
 201 }
 202 
 203 void MetaspaceGC::compute_new_size() {
 204   assert(_shrink_factor <= 100, "invalid shrink factor");
 205   uint current_shrink_factor = _shrink_factor;
 206   _shrink_factor = 0;
 207 
 208   // Using committed_bytes() for used_after_gc is an overestimation, since the
 209   // chunk free lists are included in committed_bytes() and the memory in an
 210   // un-fragmented chunk free list is available for future allocations.
 211   // However, if the chunk free lists becomes fragmented, then the memory may
 212   // not be available for future allocations and the memory is therefore "in use".
 213   // Including the chunk free lists in the definition of "in use" is therefore
 214   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 215   // shrink below committed_bytes() and this has caused serious bugs in the past.
 216   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 217   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 218 
 219   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 220   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 221 
 222   const double min_tmp = used_after_gc / maximum_used_percentage;
 223   size_t minimum_desired_capacity =
 224     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 225   // Don't shrink less than the initial generation size
 226   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 227                                   MetaspaceSize);
 228 
 229   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 230   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 231                            minimum_free_percentage, maximum_used_percentage);
 232   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 233 
 234 
 235   size_t shrink_bytes = 0;
 236   if (capacity_until_GC < minimum_desired_capacity) {
 237     // If we have less capacity below the metaspace HWM, then
 238     // increment the HWM.
 239     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 240     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 241     // Don't expand unless it's significant
 242     if (expand_bytes >= MinMetaspaceExpansion) {
 243       size_t new_capacity_until_GC = 0;
 244       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
 245       assert(succeeded, "Should always succesfully increment HWM when at safepoint");
 246 
 247       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 248                                                new_capacity_until_GC,
 249                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
 250       log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
 251                                minimum_desired_capacity / (double) K,
 252                                expand_bytes / (double) K,
 253                                MinMetaspaceExpansion / (double) K,
 254                                new_capacity_until_GC / (double) K);
 255     }
 256     return;
 257   }
 258 
 259   // No expansion, now see if we want to shrink
 260   // We would never want to shrink more than this
 261   assert(capacity_until_GC >= minimum_desired_capacity,
 262          SIZE_FORMAT " >= " SIZE_FORMAT,
 263          capacity_until_GC, minimum_desired_capacity);
 264   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 265 
 266   // Should shrinking be considered?
 267   if (MaxMetaspaceFreeRatio < 100) {
 268     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 269     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 270     const double max_tmp = used_after_gc / minimum_used_percentage;
 271     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 272     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 273                                     MetaspaceSize);
 274     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 275                              maximum_free_percentage, minimum_used_percentage);
 276     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 277                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 278 
 279     assert(minimum_desired_capacity <= maximum_desired_capacity,
 280            "sanity check");
 281 
 282     if (capacity_until_GC > maximum_desired_capacity) {
 283       // Capacity too large, compute shrinking size
 284       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 285       // We don't want shrink all the way back to initSize if people call
 286       // System.gc(), because some programs do that between "phases" and then
 287       // we'd just have to grow the heap up again for the next phase.  So we
 288       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 289       // on the third call, and 100% by the fourth call.  But if we recompute
 290       // size without shrinking, it goes back to 0%.
 291       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 292 
 293       shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
 294 
 295       assert(shrink_bytes <= max_shrink_bytes,
 296              "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
 297              shrink_bytes, max_shrink_bytes);
 298       if (current_shrink_factor == 0) {
 299         _shrink_factor = 10;
 300       } else {
 301         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
 302       }
 303       log_trace(gc, metaspace)("    shrinking:  initThreshold: %.1fK  maximum_desired_capacity: %.1fK",
 304                                MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
 305       log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
 306                                shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
 307     }
 308   }
 309 
 310   // Don't shrink unless it's significant
 311   if (shrink_bytes >= MinMetaspaceExpansion &&
 312       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
 313     size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
 314     Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
 315                                              new_capacity_until_GC,
 316                                              MetaspaceGCThresholdUpdater::ComputeNewSize);
 317   }
 318 }
 319 
 320 
 321 
 322 } // namespace metaspace
 323 
 324