1 /*
   2  * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  31 #include "logging/logStream.hpp"
  32 #include "runtime/orderAccess.hpp"
  33 
  34 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
  35   _heap(heap),
  36   _mutator_free_bitmap(max_regions, mtGC),
  37   _collector_free_bitmap(max_regions, mtGC),
  38   _max(max_regions)
  39 {
  40   clear_internal();
  41 }
  42 
  43 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  44   shenandoah_assert_heaplocked();
  45   _used += num_bytes;
  46 
  47   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
  48          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
  49 }
  50 
  51 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  52   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  53           idx, _max, _mutator_leftmost, _mutator_rightmost);
  54   return _mutator_free_bitmap.at(idx);
  55 }
  56 
  57 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  58   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  59           idx, _max, _collector_leftmost, _collector_rightmost);
  60   return _collector_free_bitmap.at(idx);
  61 }
  62 
  63 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
  64   // Scan the bitmap looking for a first fit.
  65   //
  66   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  67   // we would find the region to allocate at right away.
  68   //
  69   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  70   // go to the end. This makes application allocation faster, because we would clear lots
  71   // of regions from the beginning most of the time.
  72   //
  73   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  74   // unless we special cases for stealing and mixed allocations.
  75 
  76   switch (req.type()) {
  77     case ShenandoahAllocRequest::_alloc_tlab:
  78     case ShenandoahAllocRequest::_alloc_shared: {
  79 
  80       // Try to allocate in the mutator view
  81       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  82         if (is_mutator_free(idx)) {
  83           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  84           if (result != NULL) {
  85             return result;
  86           }
  87         }
  88       }
  89 
  90       // There is no recovery. Mutator does not touch collector view at all.
  91       break;
  92     }
  93     case ShenandoahAllocRequest::_alloc_gclab:
  94     case ShenandoahAllocRequest::_alloc_shared_gc: {
  95       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  96 
  97       // Fast-path: try to allocate in the collector view first
  98       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  99         size_t idx = c - 1;
 100         if (is_collector_free(idx)) {
 101           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 102           if (result != NULL) {
 103             return result;
 104           }
 105         }
 106       }
 107 
 108       // No dice. Can we borrow space from mutator view?
 109       if (!ShenandoahEvacReserveOverflow) {
 110         return NULL;
 111       }
 112 
 113       // Try to steal the empty region from the mutator view
 114       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 115         size_t idx = c - 1;
 116         if (is_mutator_free(idx)) {
 117           ShenandoahHeapRegion* r = _heap->get_region(idx);
 118           if (can_allocate_from(r)) {
 119             flip_to_gc(r);
 120             HeapWord *result = try_allocate_in(r, req, in_new_region);
 121             if (result != NULL) {
 122               return result;
 123             }
 124           }
 125         }
 126       }
 127 
 128       // No dice. Do not try to mix mutator and GC allocations, because
 129       // URWM moves due to GC allocations would expose unparsable mutator
 130       // allocations.
 131 
 132       break;
 133     }
 134     default:
 135       ShouldNotReachHere();
 136   }
 137 
 138   return NULL;
 139 }
 140 
 141 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 142   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 143 
 144   if (_heap->is_concurrent_weak_root_in_progress() &&
 145       r->is_trash()) {
 146     return NULL;
 147   }
 148 
 149   try_recycle_trashed(r);
 150 
 151   in_new_region = r->is_empty();
 152 
 153   HeapWord* result = NULL;
 154   size_t size = req.size();
 155 
 156   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 157     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 158     if (size > free) {
 159       size = free;
 160     }
 161     if (size >= req.min_size()) {
 162       result = r->allocate(size, req.type());
 163       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
 164     }
 165   } else {
 166     result = r->allocate(size, req.type());
 167   }
 168 
 169   if (result != NULL) {
 170     // Allocation successful, bump stats:
 171     if (req.is_mutator_alloc()) {
 172       increase_used(size * HeapWordSize);
 173     }
 174 
 175     // Record actual allocation size
 176     req.set_actual_size(size);
 177 
 178     // FIXME: No need, recursive evacuation handles this for normal?
 179 //    if (req.is_gc_alloc()) {
 180 //      r->set_update_watermark(r->top());
 181 //    }
 182   }
 183 
 184   if (result == NULL || has_no_alloc_capacity(r)) {
 185     // Region cannot afford this or future allocations. Retire it.
 186     //
 187     // While this seems a bit harsh, especially in the case when this large allocation does not
 188     // fit, but the next small one would, we are risking to inflate scan times when lots of
 189     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 190     // TODO: Record first fully-empty region, and use that for large allocations
 191 
 192     // Record the remainder as allocation waste
 193     if (req.is_mutator_alloc()) {
 194       size_t waste = r->free();
 195       if (waste > 0) {
 196         increase_used(waste);
 197         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 198       }
 199     }
 200 
 201     size_t num = r->index();
 202     _collector_free_bitmap.clear_bit(num);
 203     _mutator_free_bitmap.clear_bit(num);
 204     // Touched the bounds? Need to update:
 205     if (touches_bounds(num)) {
 206       adjust_bounds();
 207     }
 208     assert_bounds();
 209   }
 210   return result;
 211 }
 212 
 213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 214   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 215 }
 216 
 217 void ShenandoahFreeSet::recompute_bounds() {
 218   // Reset to the most pessimistic case:
 219   _mutator_rightmost = _max - 1;
 220   _mutator_leftmost = 0;
 221   _collector_rightmost = _max - 1;
 222   _collector_leftmost = 0;
 223 
 224   // ...and adjust from there
 225   adjust_bounds();
 226 }
 227 
 228 void ShenandoahFreeSet::adjust_bounds() {
 229   // Rewind both mutator bounds until the next bit.
 230   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 231     _mutator_leftmost++;
 232   }
 233   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 234     _mutator_rightmost--;
 235   }
 236   // Rewind both collector bounds until the next bit.
 237   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 238     _collector_leftmost++;
 239   }
 240   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 241     _collector_rightmost--;
 242   }
 243 }
 244 
 245 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 246   shenandoah_assert_heaplocked();
 247 
 248   size_t words_size = req.size();
 249   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 250 
 251   // No regions left to satisfy allocation, bye.
 252   if (num > mutator_count()) {
 253     return NULL;
 254   }
 255 
 256   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 257   // inclusive. Contiguous allocations are biased to the beginning.
 258 
 259   size_t beg = _mutator_leftmost;
 260   size_t end = beg;
 261 
 262   while (true) {
 263     if (end >= _max) {
 264       // Hit the end, goodbye
 265       return NULL;
 266     }
 267 
 268     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 269     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 270     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
 271       end++;
 272       beg = end;
 273       continue;
 274     }
 275 
 276     if ((end - beg + 1) == num) {
 277       // found the match
 278       break;
 279     }
 280 
 281     end++;
 282   };
 283 
 284   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 285 
 286   // Initialize regions:
 287   for (size_t i = beg; i <= end; i++) {
 288     ShenandoahHeapRegion* r = _heap->get_region(i);
 289     try_recycle_trashed(r);
 290 
 291     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 292     assert(r->is_empty(), "Should be empty");
 293 
 294     if (i == beg) {
 295       r->make_humongous_start();
 296     } else {
 297       r->make_humongous_cont();
 298     }
 299 
 300     // Trailing region may be non-full, record the remainder there
 301     size_t used_words;
 302     if ((i == end) && (remainder != 0)) {
 303       used_words = remainder;
 304     } else {
 305       used_words = ShenandoahHeapRegion::region_size_words();
 306     }
 307 
 308     r->set_top(r->bottom() + used_words);
 309 
 310     _mutator_free_bitmap.clear_bit(r->index());
 311   }
 312 
 313   // While individual regions report their true use, all humongous regions are
 314   // marked used in the free set.
 315   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 316 
 317   if (remainder != 0) {
 318     // Record this remainder as allocation waste
 319     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 320   }
 321 
 322   // Allocated at left/rightmost? Move the bounds appropriately.
 323   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 324     adjust_bounds();
 325   }
 326   assert_bounds();
 327 
 328   req.set_actual_size(words_size);
 329   return _heap->get_region(beg)->bottom();
 330 }
 331 
 332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
 333   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 334 }
 335 
 336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 337   if (r->is_trash()) {
 338     // This would be recycled on allocation path
 339     return ShenandoahHeapRegion::region_size_bytes();
 340   } else {
 341     return r->free();
 342   }
 343 }
 344 
 345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 346   return alloc_capacity(r) == 0;
 347 }
 348 
 349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 350   if (r->is_trash()) {
 351     _heap->decrease_used(r->used());
 352     r->recycle();
 353   }
 354 }
 355 
 356 void ShenandoahFreeSet::recycle_trash() {
 357   // lock is not reentrable, check we don't have it
 358   shenandoah_assert_not_heaplocked();
 359 
 360   for (size_t i = 0; i < _heap->num_regions(); i++) {
 361     ShenandoahHeapRegion* r = _heap->get_region(i);
 362     if (r->is_trash()) {
 363       ShenandoahHeapLocker locker(_heap->lock());
 364       try_recycle_trashed(r);
 365     }
 366     SpinPause(); // allow allocators to take the lock
 367   }
 368 }
 369 
 370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 371   size_t idx = r->index();
 372 
 373   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 374   assert(can_allocate_from(r), "Should not be allocated");
 375 
 376   _mutator_free_bitmap.clear_bit(idx);
 377   _collector_free_bitmap.set_bit(idx);
 378   _collector_leftmost = MIN2(idx, _collector_leftmost);
 379   _collector_rightmost = MAX2(idx, _collector_rightmost);
 380 
 381   _capacity -= alloc_capacity(r);
 382 
 383   if (touches_bounds(idx)) {
 384     adjust_bounds();
 385   }
 386   assert_bounds();
 387 }
 388 
 389 void ShenandoahFreeSet::clear() {
 390   shenandoah_assert_heaplocked();
 391   clear_internal();
 392 }
 393 
 394 void ShenandoahFreeSet::clear_internal() {
 395   _mutator_free_bitmap.clear();
 396   _collector_free_bitmap.clear();
 397   _mutator_leftmost = _max;
 398   _mutator_rightmost = 0;
 399   _collector_leftmost = _max;
 400   _collector_rightmost = 0;
 401   _capacity = 0;
 402   _used = 0;
 403 }
 404 
 405 void ShenandoahFreeSet::rebuild() {
 406   shenandoah_assert_heaplocked();
 407   clear();
 408 
 409   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 410     ShenandoahHeapRegion* region = _heap->get_region(idx);
 411     if (region->is_alloc_allowed() || region->is_trash()) {
 412       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 413 
 414       // Do not add regions that would surely fail allocation
 415       if (has_no_alloc_capacity(region)) continue;
 416 
 417       _capacity += alloc_capacity(region);
 418       assert(_used <= _capacity, "must not use more than we have");
 419 
 420       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 421       _mutator_free_bitmap.set_bit(idx);
 422     }
 423   }
 424 
 425   // Evac reserve: reserve trailing space for evacuations
 426   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 427   size_t reserved = 0;
 428 
 429   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 430     if (reserved >= to_reserve) break;
 431 
 432     ShenandoahHeapRegion* region = _heap->get_region(idx);
 433     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
 434       _mutator_free_bitmap.clear_bit(idx);
 435       _collector_free_bitmap.set_bit(idx);
 436       size_t ac = alloc_capacity(region);
 437       _capacity -= ac;
 438       reserved += ac;
 439     }
 440   }
 441 
 442   recompute_bounds();
 443   assert_bounds();
 444 }
 445 
 446 void ShenandoahFreeSet::log_status() {
 447   shenandoah_assert_heaplocked();
 448 
 449   LogTarget(Info, gc, ergo) lt;
 450   if (lt.is_enabled()) {
 451     ResourceMark rm;
 452     LogStream ls(lt);
 453 
 454     {
 455       size_t last_idx = 0;
 456       size_t max = 0;
 457       size_t max_contig = 0;
 458       size_t empty_contig = 0;
 459 
 460       size_t total_used = 0;
 461       size_t total_free = 0;
 462       size_t total_free_ext = 0;
 463 
 464       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 465         if (is_mutator_free(idx)) {
 466           ShenandoahHeapRegion *r = _heap->get_region(idx);
 467           size_t free = alloc_capacity(r);
 468 
 469           max = MAX2(max, free);
 470 
 471           if (r->is_empty()) {
 472             total_free_ext += free;
 473             if (last_idx + 1 == idx) {
 474               empty_contig++;
 475             } else {
 476               empty_contig = 1;
 477             }
 478           } else {
 479             empty_contig = 0;
 480           }
 481 
 482           total_used += r->used();
 483           total_free += free;
 484 
 485           max_contig = MAX2(max_contig, empty_contig);
 486           last_idx = idx;
 487         }
 488       }
 489 
 490       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 491       size_t free = capacity() - used();
 492 
 493       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
 494                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
 495                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
 496                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
 497       );
 498 
 499       ls.print("Frag: ");
 500       size_t frag_ext;
 501       if (total_free_ext > 0) {
 502         frag_ext = 100 - (100 * max_humongous / total_free_ext);
 503       } else {
 504         frag_ext = 0;
 505       }
 506       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
 507 
 508       size_t frag_int;
 509       if (mutator_count() > 0) {
 510         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 511       } else {
 512         frag_int = 0;
 513       }
 514       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
 515     }
 516 
 517     {
 518       size_t max = 0;
 519       size_t total_free = 0;
 520 
 521       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 522         if (is_collector_free(idx)) {
 523           ShenandoahHeapRegion *r = _heap->get_region(idx);
 524           size_t free = alloc_capacity(r);
 525           max = MAX2(max, free);
 526           total_free += free;
 527         }
 528       }
 529 
 530       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
 531                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
 532                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
 533     }
 534   }
 535 }
 536 
 537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 538   shenandoah_assert_heaplocked();
 539   assert_bounds();
 540 
 541   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 542     switch (req.type()) {
 543       case ShenandoahAllocRequest::_alloc_shared:
 544       case ShenandoahAllocRequest::_alloc_shared_gc:
 545         in_new_region = true;
 546         return allocate_contiguous(req);
 547       case ShenandoahAllocRequest::_alloc_gclab:
 548       case ShenandoahAllocRequest::_alloc_tlab:
 549         in_new_region = false;
 550         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 551                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
 552         return NULL;
 553       default:
 554         ShouldNotReachHere();
 555         return NULL;
 556     }
 557   } else {
 558     return allocate_single(req, in_new_region);
 559   }
 560 }
 561 
 562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 563   // Deliberately not locked, this method is unsafe when free set is modified.
 564 
 565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 566     if (index < _max && is_mutator_free(index)) {
 567       ShenandoahHeapRegion* r = _heap->get_region(index);
 568       if (r->free() >= MinTLABSize) {
 569         return r->free();
 570       }
 571     }
 572   }
 573 
 574   // It appears that no regions left
 575   return 0;
 576 }
 577 
 578 void ShenandoahFreeSet::print_on(outputStream* out) const {
 579   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 580   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 581     if (is_mutator_free(index)) {
 582       _heap->get_region(index)->print_on(out);
 583     }
 584   }
 585   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 586   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 587     if (is_collector_free(index)) {
 588       _heap->get_region(index)->print_on(out);
 589     }
 590   }
 591 }
 592 
 593 /*
 594  * Internal fragmentation metric: describes how fragmented the heap regions are.
 595  *
 596  * It is derived as:
 597  *
 598  *               sum(used[i]^2, i=0..k)
 599  *   IF = 1 - ------------------------------
 600  *              C * sum(used[i], i=0..k)
 601  *
 602  * ...where k is the number of regions in computation, C is the region capacity, and
 603  * used[i] is the used space in the region.
 604  *
 605  * The non-linearity causes IF to be lower for the cases where the same total heap
 606  * used is densely packed. For example:
 607  *   a) Heap is completely full  => IF = 0
 608  *   b) Heap is half full, first 50% regions are completely full => IF = 0
 609  *   c) Heap is half full, each region is 50% full => IF = 1/2
 610  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
 611  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
 612  *   f) Heap has one small object per each region => IF =~ 1
 613  */
 614 double ShenandoahFreeSet::internal_fragmentation() {
 615   double squared = 0;
 616   double linear = 0;
 617   int count = 0;
 618 
 619   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 620     if (is_mutator_free(index)) {
 621       ShenandoahHeapRegion* r = _heap->get_region(index);
 622       size_t used = r->used();
 623       squared += used * used;
 624       linear += used;
 625       count++;
 626     }
 627   }
 628 
 629   if (count > 0) {
 630     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
 631     return 1 - s;
 632   } else {
 633     return 0;
 634   }
 635 }
 636 
 637 /*
 638  * External fragmentation metric: describes how fragmented the heap is.
 639  *
 640  * It is derived as:
 641  *
 642  *   EF = 1 - largest_contiguous_free / total_free
 643  *
 644  * For example:
 645  *   a) Heap is completely empty => EF = 0
 646  *   b) Heap is completely full => EF = 0
 647  *   c) Heap is first-half full => EF = 1/2
 648  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
 649  */
 650 double ShenandoahFreeSet::external_fragmentation() {
 651   size_t last_idx = 0;
 652   size_t max_contig = 0;
 653   size_t empty_contig = 0;
 654 
 655   size_t free = 0;
 656 
 657   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 658     if (is_mutator_free(index)) {
 659       ShenandoahHeapRegion* r = _heap->get_region(index);
 660       if (r->is_empty()) {
 661         free += ShenandoahHeapRegion::region_size_bytes();
 662         if (last_idx + 1 == index) {
 663           empty_contig++;
 664         } else {
 665           empty_contig = 1;
 666         }
 667       } else {
 668         empty_contig = 0;
 669       }
 670 
 671       max_contig = MAX2(max_contig, empty_contig);
 672       last_idx = index;
 673     }
 674   }
 675 
 676   if (free > 0) {
 677     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
 678   } else {
 679     return 0;
 680   }
 681 }
 682 
 683 #ifdef ASSERT
 684 void ShenandoahFreeSet::assert_bounds() const {
 685   // Performance invariants. Failing these would not break the free set, but performance
 686   // would suffer.
 687   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
 688   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
 689 
 690   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
 691   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
 692 
 693   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
 694   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
 695   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
 696   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
 697 
 698   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
 699   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
 700 
 701   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
 702   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
 703 
 704   beg_off = _collector_free_bitmap.get_next_one_offset(0);
 705   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
 706   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
 707   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
 708 }
 709 #endif