< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page




 115     // allocation so make the delta greater than just enough
 116     // for this allocation.
 117     delta = max_delta;
 118   } else {
 119     // This allocation is large but the next ones are probably not
 120     // so increase by the minimum.
 121     delta = delta + min_delta;
 122   }
 123 
 124   assert_is_aligned(delta, Metaspace::commit_alignment());
 125 
 126   return delta;
 127 }
 128 
 129 size_t MetaspaceGC::capacity_until_GC() {
 130   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
 131   assert(value >= MetaspaceSize, "Not initialized properly?");
 132   return value;
 133 }
 134 
 135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {






 136   assert_is_aligned(v, Metaspace::commit_alignment());
 137 
 138   size_t old_capacity_until_GC = _capacity_until_GC;
 139   size_t new_value = old_capacity_until_GC + v;
 140 
 141   if (new_value < old_capacity_until_GC) {
 142     // The addition wrapped around, set new_value to aligned max value.
 143     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 144   }
 145 










 146   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 147 
 148   if (old_capacity_until_GC != prev_value) {
 149     return false;
 150   }
 151 
 152   if (new_cap_until_GC != NULL) {
 153     *new_cap_until_GC = new_value;
 154   }
 155   if (old_cap_until_GC != NULL) {
 156     *old_cap_until_GC = old_capacity_until_GC;
 157   }
 158   return true;
 159 }
 160 
 161 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 162   assert_is_aligned(v, Metaspace::commit_alignment());
 163 
 164   return Atomic::sub(v, &_capacity_until_GC);
 165 }


 219   assert(_shrink_factor <= 100, "invalid shrink factor");
 220   uint current_shrink_factor = _shrink_factor;
 221   _shrink_factor = 0;
 222 
 223   // Using committed_bytes() for used_after_gc is an overestimation, since the
 224   // chunk free lists are included in committed_bytes() and the memory in an
 225   // un-fragmented chunk free list is available for future allocations.
 226   // However, if the chunk free lists becomes fragmented, then the memory may
 227   // not be available for future allocations and the memory is therefore "in use".
 228   // Including the chunk free lists in the definition of "in use" is therefore
 229   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 230   // shrink below committed_bytes() and this has caused serious bugs in the past.
 231   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 232   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 233 
 234   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 235   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 236 
 237   const double min_tmp = used_after_gc / maximum_used_percentage;
 238   size_t minimum_desired_capacity =
 239     (size_t)MIN2(min_tmp, double(max_uintx));
 240   // Don't shrink less than the initial generation size
 241   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 242                                   MetaspaceSize);
 243 
 244   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 245   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 246                            minimum_free_percentage, maximum_used_percentage);
 247   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 248 
 249 
 250   size_t shrink_bytes = 0;
 251   if (capacity_until_GC < minimum_desired_capacity) {
 252     // If we have less capacity below the metaspace HWM, then
 253     // increment the HWM.
 254     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 255     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 256     // Don't expand unless it's significant
 257     if (expand_bytes >= MinMetaspaceExpansion) {
 258       size_t new_capacity_until_GC = 0;
 259       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);


 266                                minimum_desired_capacity / (double) K,
 267                                expand_bytes / (double) K,
 268                                MinMetaspaceExpansion / (double) K,
 269                                new_capacity_until_GC / (double) K);
 270     }
 271     return;
 272   }
 273 
 274   // No expansion, now see if we want to shrink
 275   // We would never want to shrink more than this
 276   assert(capacity_until_GC >= minimum_desired_capacity,
 277          SIZE_FORMAT " >= " SIZE_FORMAT,
 278          capacity_until_GC, minimum_desired_capacity);
 279   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 280 
 281   // Should shrinking be considered?
 282   if (MaxMetaspaceFreeRatio < 100) {
 283     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 284     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 285     const double max_tmp = used_after_gc / minimum_used_percentage;
 286     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 287     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 288                                     MetaspaceSize);
 289     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 290                              maximum_free_percentage, minimum_used_percentage);
 291     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 292                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 293 
 294     assert(minimum_desired_capacity <= maximum_desired_capacity,
 295            "sanity check");
 296 
 297     if (capacity_until_GC > maximum_desired_capacity) {
 298       // Capacity too large, compute shrinking size
 299       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 300       // We don't want shrink all the way back to initSize if people call
 301       // System.gc(), because some programs do that between "phases" and then
 302       // we'd just have to grow the heap up again for the next phase.  So we
 303       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 304       // on the third call, and 100% by the fourth call.  But if we recompute
 305       // size without shrinking, it goes back to 0%.
 306       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;


1453 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1454   Metaspace::assert_not_frozen();
1455 
1456   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1457 
1458   // Don't use class_vsm() unless UseCompressedClassPointers is true.
1459   if (Metaspace::is_class_space_allocation(mdtype)) {
1460     return  class_vsm()->allocate(word_size);
1461   } else {
1462     return  vsm()->allocate(word_size);
1463   }
1464 }
1465 
1466 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1467   Metaspace::assert_not_frozen();
1468   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1469   assert(delta_bytes > 0, "Must be");
1470 
1471   size_t before = 0;
1472   size_t after = 0;

1473   MetaWord* res;
1474   bool incremented;
1475 
1476   // Each thread increments the HWM at most once. Even if the thread fails to increment
1477   // the HWM, an allocation is still attempted. This is because another thread must then
1478   // have incremented the HWM and therefore the allocation might still succeed.
1479   do {
1480     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1481     res = allocate(word_size, mdtype);
1482   } while (!incremented && res == NULL);
1483 
1484   if (incremented) {
1485     Metaspace::tracer()->report_gc_threshold(before, after,
1486                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1487     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1488   }
1489 
1490   return res;
1491 }
1492 
1493 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1494   return (vsm()->used_words() +
1495       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1496 }
1497 
1498 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1499   return (vsm()->capacity_words() +
1500       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1501 }
1502 




 115     // allocation so make the delta greater than just enough
 116     // for this allocation.
 117     delta = max_delta;
 118   } else {
 119     // This allocation is large but the next ones are probably not
 120     // so increase by the minimum.
 121     delta = delta + min_delta;
 122   }
 123 
 124   assert_is_aligned(delta, Metaspace::commit_alignment());
 125 
 126   return delta;
 127 }
 128 
 129 size_t MetaspaceGC::capacity_until_GC() {
 130   size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
 131   assert(value >= MetaspaceSize, "Not initialized properly?");
 132   return value;
 133 }
 134 
 135 // Try to increase metaspace size by v bytes. Returns true if
 136 // succeeded, false if not due to competing threads trying.
 137 // Optionally returns new and old metaspace capacity in
 138 // new_cap_until_GC and old_cap_until_GC respectively.
 139 // Optionally sets can_retry to indicate whether if there is actually
 140 // enough space remaining to satisfy the request.
 141 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
 142   assert_is_aligned(v, Metaspace::commit_alignment());
 143 
 144   size_t old_capacity_until_GC = _capacity_until_GC;
 145   size_t new_value = old_capacity_until_GC + v;
 146 
 147   if (new_value < old_capacity_until_GC) {
 148     // The addition wrapped around, set new_value to aligned max value.
 149     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 150   }
 151 
 152   if (new_value > MaxMetaspaceSize) {
 153     if (can_retry != NULL) {
 154       *can_retry = false;
 155     }
 156     return false;
 157   }
 158 
 159   if (can_retry != NULL) {
 160     *can_retry = true;
 161   }
 162   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 163 
 164   if (old_capacity_until_GC != prev_value) {
 165     return false;
 166   }
 167 
 168   if (new_cap_until_GC != NULL) {
 169     *new_cap_until_GC = new_value;
 170   }
 171   if (old_cap_until_GC != NULL) {
 172     *old_cap_until_GC = old_capacity_until_GC;
 173   }
 174   return true;
 175 }
 176 
 177 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 178   assert_is_aligned(v, Metaspace::commit_alignment());
 179 
 180   return Atomic::sub(v, &_capacity_until_GC);
 181 }


 235   assert(_shrink_factor <= 100, "invalid shrink factor");
 236   uint current_shrink_factor = _shrink_factor;
 237   _shrink_factor = 0;
 238 
 239   // Using committed_bytes() for used_after_gc is an overestimation, since the
 240   // chunk free lists are included in committed_bytes() and the memory in an
 241   // un-fragmented chunk free list is available for future allocations.
 242   // However, if the chunk free lists becomes fragmented, then the memory may
 243   // not be available for future allocations and the memory is therefore "in use".
 244   // Including the chunk free lists in the definition of "in use" is therefore
 245   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 246   // shrink below committed_bytes() and this has caused serious bugs in the past.
 247   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 248   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 249 
 250   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 251   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 252 
 253   const double min_tmp = used_after_gc / maximum_used_percentage;
 254   size_t minimum_desired_capacity =
 255     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 256   // Don't shrink less than the initial generation size
 257   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 258                                   MetaspaceSize);
 259 
 260   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 261   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 262                            minimum_free_percentage, maximum_used_percentage);
 263   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 264 
 265 
 266   size_t shrink_bytes = 0;
 267   if (capacity_until_GC < minimum_desired_capacity) {
 268     // If we have less capacity below the metaspace HWM, then
 269     // increment the HWM.
 270     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 271     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 272     // Don't expand unless it's significant
 273     if (expand_bytes >= MinMetaspaceExpansion) {
 274       size_t new_capacity_until_GC = 0;
 275       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);


 282                                minimum_desired_capacity / (double) K,
 283                                expand_bytes / (double) K,
 284                                MinMetaspaceExpansion / (double) K,
 285                                new_capacity_until_GC / (double) K);
 286     }
 287     return;
 288   }
 289 
 290   // No expansion, now see if we want to shrink
 291   // We would never want to shrink more than this
 292   assert(capacity_until_GC >= minimum_desired_capacity,
 293          SIZE_FORMAT " >= " SIZE_FORMAT,
 294          capacity_until_GC, minimum_desired_capacity);
 295   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 296 
 297   // Should shrinking be considered?
 298   if (MaxMetaspaceFreeRatio < 100) {
 299     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 300     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 301     const double max_tmp = used_after_gc / minimum_used_percentage;
 302     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 303     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 304                                     MetaspaceSize);
 305     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 306                              maximum_free_percentage, minimum_used_percentage);
 307     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 308                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 309 
 310     assert(minimum_desired_capacity <= maximum_desired_capacity,
 311            "sanity check");
 312 
 313     if (capacity_until_GC > maximum_desired_capacity) {
 314       // Capacity too large, compute shrinking size
 315       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 316       // We don't want shrink all the way back to initSize if people call
 317       // System.gc(), because some programs do that between "phases" and then
 318       // we'd just have to grow the heap up again for the next phase.  So we
 319       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 320       // on the third call, and 100% by the fourth call.  But if we recompute
 321       // size without shrinking, it goes back to 0%.
 322       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;


1469 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1470   Metaspace::assert_not_frozen();
1471 
1472   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1473 
1474   // Don't use class_vsm() unless UseCompressedClassPointers is true.
1475   if (Metaspace::is_class_space_allocation(mdtype)) {
1476     return  class_vsm()->allocate(word_size);
1477   } else {
1478     return  vsm()->allocate(word_size);
1479   }
1480 }
1481 
1482 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1483   Metaspace::assert_not_frozen();
1484   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1485   assert(delta_bytes > 0, "Must be");
1486 
1487   size_t before = 0;
1488   size_t after = 0;
1489   bool can_retry = true;
1490   MetaWord* res;
1491   bool incremented;
1492 
1493   // Each thread increments the HWM at most once. Even if the thread fails to increment
1494   // the HWM, an allocation is still attempted. This is because another thread must then
1495   // have incremented the HWM and therefore the allocation might still succeed.
1496   do {
1497     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
1498     res = allocate(word_size, mdtype);
1499   } while (!incremented && res == NULL && can_retry);
1500 
1501   if (incremented) {
1502     Metaspace::tracer()->report_gc_threshold(before, after,
1503                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1504     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1505   }
1506 
1507   return res;
1508 }
1509 
1510 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1511   return (vsm()->used_words() +
1512       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1513 }
1514 
1515 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1516   return (vsm()->capacity_words() +
1517       (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1518 }
1519 


< prev index next >