< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page




 132   return value;
 133 }
 134 
 135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
 136   assert_is_aligned(v, Metaspace::commit_alignment());
 137 
 138   size_t old_capacity_until_GC = _capacity_until_GC;
 139   size_t new_value = old_capacity_until_GC + v;
 140 
 141   if (new_value < old_capacity_until_GC) {
 142     // The addition wrapped around, set new_value to aligned max value.
 143     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 144   }
 145 
 146   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 147 
 148   if (old_capacity_until_GC != prev_value) {
 149     return false;
 150   }
 151 






 152   if (new_cap_until_GC != NULL) {
 153     *new_cap_until_GC = new_value;
 154   }
 155   if (old_cap_until_GC != NULL) {
 156     *old_cap_until_GC = old_capacity_until_GC;
 157   }
 158   return true;
 159 }
 160 
 161 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 162   assert_is_aligned(v, Metaspace::commit_alignment());
 163 
 164   return Atomic::sub(v, &_capacity_until_GC);
 165 }
 166 
 167 void MetaspaceGC::initialize() {
 168   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 169   // we can't do a GC during initialization.
 170   _capacity_until_GC = MaxMetaspaceSize;
 171 }


 219   assert(_shrink_factor <= 100, "invalid shrink factor");
 220   uint current_shrink_factor = _shrink_factor;
 221   _shrink_factor = 0;
 222 
 223   // Using committed_bytes() for used_after_gc is an overestimation, since the
 224   // chunk free lists are included in committed_bytes() and the memory in an
 225   // un-fragmented chunk free list is available for future allocations.
 226   // However, if the chunk free lists becomes fragmented, then the memory may
 227   // not be available for future allocations and the memory is therefore "in use".
 228   // Including the chunk free lists in the definition of "in use" is therefore
 229   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 230   // shrink below committed_bytes() and this has caused serious bugs in the past.
 231   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 232   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 233 
 234   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 235   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 236 
 237   const double min_tmp = used_after_gc / maximum_used_percentage;
 238   size_t minimum_desired_capacity =
 239     (size_t)MIN2(min_tmp, double(max_uintx));
 240   // Don't shrink less than the initial generation size
 241   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 242                                   MetaspaceSize);
 243 
 244   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 245   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 246                            minimum_free_percentage, maximum_used_percentage);
 247   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 248 
 249 
 250   size_t shrink_bytes = 0;
 251   if (capacity_until_GC < minimum_desired_capacity) {
 252     // If we have less capacity below the metaspace HWM, then
 253     // increment the HWM.
 254     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 255     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 256     // Don't expand unless it's significant
 257     if (expand_bytes >= MinMetaspaceExpansion) {
 258       size_t new_capacity_until_GC = 0;
 259       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);


 266                                minimum_desired_capacity / (double) K,
 267                                expand_bytes / (double) K,
 268                                MinMetaspaceExpansion / (double) K,
 269                                new_capacity_until_GC / (double) K);
 270     }
 271     return;
 272   }
 273 
 274   // No expansion, now see if we want to shrink
 275   // We would never want to shrink more than this
 276   assert(capacity_until_GC >= minimum_desired_capacity,
 277          SIZE_FORMAT " >= " SIZE_FORMAT,
 278          capacity_until_GC, minimum_desired_capacity);
 279   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 280 
 281   // Should shrinking be considered?
 282   if (MaxMetaspaceFreeRatio < 100) {
 283     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 284     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 285     const double max_tmp = used_after_gc / minimum_used_percentage;
 286     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
 287     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 288                                     MetaspaceSize);
 289     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 290                              maximum_free_percentage, minimum_used_percentage);
 291     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 292                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 293 
 294     assert(minimum_desired_capacity <= maximum_desired_capacity,
 295            "sanity check");
 296 
 297     if (capacity_until_GC > maximum_desired_capacity) {
 298       // Capacity too large, compute shrinking size
 299       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 300       // We don't want shrink all the way back to initSize if people call
 301       // System.gc(), because some programs do that between "phases" and then
 302       // we'd just have to grow the heap up again for the next phase.  So we
 303       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 304       // on the third call, and 100% by the fourth call.  But if we recompute
 305       // size without shrinking, it goes back to 0%.
 306       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;


1460     return  class_vsm()->allocate(word_size);
1461   } else {
1462     return  vsm()->allocate(word_size);
1463   }
1464 }
1465 
1466 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1467   Metaspace::assert_not_frozen();
1468   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1469   assert(delta_bytes > 0, "Must be");
1470 
1471   size_t before = 0;
1472   size_t after = 0;
1473   MetaWord* res;
1474   bool incremented;
1475 
1476   // Each thread increments the HWM at most once. Even if the thread fails to increment
1477   // the HWM, an allocation is still attempted. This is because another thread must then
1478   // have incremented the HWM and therefore the allocation might still succeed.
1479   do {



1480     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1481     res = allocate(word_size, mdtype);
1482   } while (!incremented && res == NULL);
1483 
1484   if (incremented) {
1485     Metaspace::tracer()->report_gc_threshold(before, after,
1486                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1487     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1488   }
1489 
1490   return res;
1491 }
1492 
1493 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1494   return (vsm()->used_words() +
1495       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1496 }
1497 
1498 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1499   return (vsm()->capacity_words() +




 132   return value;
 133 }
 134 
 135 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
 136   assert_is_aligned(v, Metaspace::commit_alignment());
 137 
 138   size_t old_capacity_until_GC = _capacity_until_GC;
 139   size_t new_value = old_capacity_until_GC + v;
 140 
 141   if (new_value < old_capacity_until_GC) {
 142     // The addition wrapped around, set new_value to aligned max value.
 143     new_value = align_down(max_uintx, Metaspace::commit_alignment());
 144   }
 145 
 146   size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
 147 
 148   if (old_capacity_until_GC != prev_value) {
 149     return false;
 150   }
 151 
 152   // Check after the increment that we did not go over the maximum.
 153   // We can not do this earlier due to potential races.
 154   assert(new_value <= MaxMetaspaceSize,
 155          "new_value: " SIZE_FORMAT " > MaxMetaspaceSize: " SIZE_FORMAT,
 156          new_value, MaxMetaspaceSize);
 157 
 158   if (new_cap_until_GC != NULL) {
 159     *new_cap_until_GC = new_value;
 160   }
 161   if (old_cap_until_GC != NULL) {
 162     *old_cap_until_GC = old_capacity_until_GC;
 163   }
 164   return true;
 165 }
 166 
 167 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
 168   assert_is_aligned(v, Metaspace::commit_alignment());
 169 
 170   return Atomic::sub(v, &_capacity_until_GC);
 171 }
 172 
 173 void MetaspaceGC::initialize() {
 174   // Set the high-water mark to MaxMetapaceSize during VM initializaton since
 175   // we can't do a GC during initialization.
 176   _capacity_until_GC = MaxMetaspaceSize;
 177 }


 225   assert(_shrink_factor <= 100, "invalid shrink factor");
 226   uint current_shrink_factor = _shrink_factor;
 227   _shrink_factor = 0;
 228 
 229   // Using committed_bytes() for used_after_gc is an overestimation, since the
 230   // chunk free lists are included in committed_bytes() and the memory in an
 231   // un-fragmented chunk free list is available for future allocations.
 232   // However, if the chunk free lists becomes fragmented, then the memory may
 233   // not be available for future allocations and the memory is therefore "in use".
 234   // Including the chunk free lists in the definition of "in use" is therefore
 235   // necessary. Not including the chunk free lists can cause capacity_until_GC to
 236   // shrink below committed_bytes() and this has caused serious bugs in the past.
 237   const size_t used_after_gc = MetaspaceUtils::committed_bytes();
 238   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 239 
 240   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
 241   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
 242 
 243   const double min_tmp = used_after_gc / maximum_used_percentage;
 244   size_t minimum_desired_capacity =
 245     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
 246   // Don't shrink less than the initial generation size
 247   minimum_desired_capacity = MAX2(minimum_desired_capacity,
 248                                   MetaspaceSize);
 249 
 250   log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
 251   log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
 252                            minimum_free_percentage, maximum_used_percentage);
 253   log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 254 
 255 
 256   size_t shrink_bytes = 0;
 257   if (capacity_until_GC < minimum_desired_capacity) {
 258     // If we have less capacity below the metaspace HWM, then
 259     // increment the HWM.
 260     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
 261     expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
 262     // Don't expand unless it's significant
 263     if (expand_bytes >= MinMetaspaceExpansion) {
 264       size_t new_capacity_until_GC = 0;
 265       bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);


 272                                minimum_desired_capacity / (double) K,
 273                                expand_bytes / (double) K,
 274                                MinMetaspaceExpansion / (double) K,
 275                                new_capacity_until_GC / (double) K);
 276     }
 277     return;
 278   }
 279 
 280   // No expansion, now see if we want to shrink
 281   // We would never want to shrink more than this
 282   assert(capacity_until_GC >= minimum_desired_capacity,
 283          SIZE_FORMAT " >= " SIZE_FORMAT,
 284          capacity_until_GC, minimum_desired_capacity);
 285   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
 286 
 287   // Should shrinking be considered?
 288   if (MaxMetaspaceFreeRatio < 100) {
 289     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
 290     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
 291     const double max_tmp = used_after_gc / minimum_used_percentage;
 292     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
 293     maximum_desired_capacity = MAX2(maximum_desired_capacity,
 294                                     MetaspaceSize);
 295     log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
 296                              maximum_free_percentage, minimum_used_percentage);
 297     log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
 298                              minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 299 
 300     assert(minimum_desired_capacity <= maximum_desired_capacity,
 301            "sanity check");
 302 
 303     if (capacity_until_GC > maximum_desired_capacity) {
 304       // Capacity too large, compute shrinking size
 305       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
 306       // We don't want shrink all the way back to initSize if people call
 307       // System.gc(), because some programs do that between "phases" and then
 308       // we'd just have to grow the heap up again for the next phase.  So we
 309       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 310       // on the third call, and 100% by the fourth call.  But if we recompute
 311       // size without shrinking, it goes back to 0%.
 312       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;


1466     return  class_vsm()->allocate(word_size);
1467   } else {
1468     return  vsm()->allocate(word_size);
1469   }
1470 }
1471 
1472 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1473   Metaspace::assert_not_frozen();
1474   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1475   assert(delta_bytes > 0, "Must be");
1476 
1477   size_t before = 0;
1478   size_t after = 0;
1479   MetaWord* res;
1480   bool incremented;
1481 
1482   // Each thread increments the HWM at most once. Even if the thread fails to increment
1483   // the HWM, an allocation is still attempted. This is because another thread must then
1484   // have incremented the HWM and therefore the allocation might still succeed.
1485   do {
1486     if (MetaspaceGC::capacity_until_GC() + delta_bytes > MaxMetaspaceSize) {
1487       return NULL;
1488     }
1489     incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
1490     res = allocate(word_size, mdtype);
1491   } while (!incremented && res == NULL);
1492 
1493   if (incremented) {
1494     Metaspace::tracer()->report_gc_threshold(before, after,
1495                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1496     log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1497   }
1498 
1499   return res;
1500 }
1501 
1502 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1503   return (vsm()->used_words() +
1504       (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1505 }
1506 
1507 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1508   return (vsm()->capacity_words() +


< prev index next >