1 /*
   2  * Copyright (c) 2016, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  26 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  27 
  28 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap,size_t max_regions) :
  29         _heap(heap),
  30         _mutator_free_bitmap(max_regions, /* in_resource_area = */ false),
  31         _collector_free_bitmap(max_regions, /* in_resource_area = */ false),
  32         _max(max_regions)
  33 {
  34   clear_internal();
  35 }
  36 
  37 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  38   assert_heaplock_owned_by_current_thread();
  39   _used += num_bytes;
  40 
  41   assert(_used <= _capacity, err_msg("must not use more than we have: used: "SIZE_FORMAT
  42                                      ", capacity: "SIZE_FORMAT", num_bytes: "SIZE_FORMAT,
  43                                      _used, _capacity, num_bytes));
  44 }
  45 
  46 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  47   assert (idx < _max,
  48           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  49                   idx, _max, _mutator_leftmost, _mutator_rightmost));
  50   return _mutator_free_bitmap.at(idx);
  51 }
  52 
  53 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  54   assert (idx < _max,
  55           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  56                   idx, _max, _collector_leftmost, _collector_rightmost));
  57   return _collector_free_bitmap.at(idx);
  58 }
  59 
  60 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
  61   // Scan the bitmap looking for a first fit.
  62   //
  63   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  64   // we would find the region to allocate at right away.
  65   //
  66   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  67   // go to the end. This makes application allocation faster, because we would clear lots
  68   // of regions from the beginning most of the time.
  69   //
  70   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  71   // unless we special cases for stealing and mixed allocations.
  72 
  73   switch (req.type()) {
  74     case ShenandoahHeap::_alloc_tlab:
  75     case ShenandoahHeap::_alloc_shared: {
  76 
  77       // Try to allocate in the mutator view
  78       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  79         if (is_mutator_free(idx)) {
  80           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  81           if (result != NULL) {
  82             return result;
  83           }
  84         }
  85       }
  86 
  87       // There is no recovery. Mutator does not touch collector view at all.
  88       break;
  89     }
  90     case ShenandoahHeap::_alloc_gclab:
  91     case ShenandoahHeap::_alloc_shared_gc: {
  92       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  93 
  94       // Fast-path: try to allocate in the collector view first
  95       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  96         size_t idx = c - 1;
  97         if (is_collector_free(idx)) {
  98           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  99           if (result != NULL) {
 100             return result;
 101           }
 102         }
 103       }
 104 
 105       // No dice. Can we borrow space from mutator view?
 106       if (!ShenandoahEvacReserveOverflow) {
 107         return NULL;
 108       }
 109 
 110       // Try to steal the empty region from the mutator view
 111       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 112         size_t idx = c - 1;
 113         if (is_mutator_free(idx)) {
 114           ShenandoahHeapRegion* r = _heap->get_region(idx);
 115           if (is_empty_or_trash(r)) {
 116             flip_to_gc(r);
 117             HeapWord *result = try_allocate_in(r, req, in_new_region);
 118             if (result != NULL) {
 119               return result;
 120             }
 121           }
 122         }
 123       }
 124 
 125       // Try to mix the allocation into the mutator view:
 126       if (ShenandoahAllowMixedAllocs) {
 127         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 128           size_t idx = c - 1;
 129           if (is_mutator_free(idx)) {
 130             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 131             if (result != NULL) {
 132               return result;
 133             }
 134           }
 135         }
 136       }
 137       break;
 138     }
 139     default:
 140       ShouldNotReachHere();
 141   }
 142 
 143   return NULL;
 144 }
 145 
 146 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
 147   assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number()));
 148 
 149   try_recycle_trashed(r);
 150 
 151   in_new_region = r->is_empty();
 152 
 153   HeapWord* result = NULL;
 154   size_t size = req.size();
 155 
 156   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 157     size_t free = align_size_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 158     if (size > free) {
 159       size = free;
 160     }
 161     if (size >= req.min_size()) {
 162       result = r->allocate(size, req.type());
 163       assert (result != NULL, err_msg("Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size));
 164     }
 165   } else {
 166     result = r->allocate(size, req.type());
 167   }
 168 
 169   if (result != NULL) {
 170     // Allocation successful, bump stats:
 171     if (req.is_mutator_alloc()) {
 172       increase_used(size * HeapWordSize);
 173     }
 174 
 175     // Record actual allocation size
 176     req.set_actual_size(size);
 177   }
 178 
 179   if (result == NULL || has_no_alloc_capacity(r)) {
 180     // Region cannot afford this or future allocations. Retire it.
 181     //
 182     // While this seems a bit harsh, especially in the case when this large allocation does not
 183     // fit, but the next small one would, we are risking to inflate scan times when lots of
 184     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 185     // TODO: Record first fully-empty region, and use that for large allocations
 186 
 187     // Record the remainder as allocation waste
 188     if (req.is_mutator_alloc()) {
 189       size_t waste = r->free();
 190       if (waste > 0) {
 191         increase_used(waste);
 192         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 193       }
 194     }
 195 
 196     size_t num = r->region_number();
 197     _collector_free_bitmap.clear_bit(num);
 198     _mutator_free_bitmap.clear_bit(num);
 199     // Touched the bounds? Need to update:
 200     if (touches_bounds(num)) {
 201       adjust_bounds();
 202     }
 203     assert_bounds();
 204   }
 205   return result;
 206 }
 207 
 208 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 209   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 210 }
 211 
 212 void ShenandoahFreeSet::recompute_bounds() {
 213   // Reset to the most pessimistic case:
 214   _mutator_rightmost = _max - 1;
 215   _mutator_leftmost = 0;
 216   _collector_rightmost = _max - 1;
 217   _collector_leftmost = 0;
 218 
 219   // ...and adjust from there
 220   adjust_bounds();
 221 }
 222 
 223 void ShenandoahFreeSet::adjust_bounds() {
 224   // Rewind both mutator bounds until the next bit.
 225   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 226     _mutator_leftmost++;
 227   }
 228   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 229     _mutator_rightmost--;
 230   }
 231   // Rewind both collector bounds until the next bit.
 232   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 233     _collector_leftmost++;
 234   }
 235   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 236     _collector_rightmost--;
 237   }
 238 }
 239 
 240 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahHeap::ShenandoahAllocationRequest& req) {
 241   assert_heaplock_owned_by_current_thread();
 242 
 243   size_t words_size = req.size();
 244   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 245 
 246   // No regions left to satisfy allocation, bye.
 247   if (num > mutator_count()) {
 248     return NULL;
 249   }
 250 
 251   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 252   // inclusive. Contiguous allocations are biased to the beginning.
 253 
 254   size_t beg = _mutator_leftmost;
 255   size_t end = beg;
 256 
 257   while (true) {
 258     if (end >= _max) {
 259       // Hit the end, goodbye
 260       return NULL;
 261     }
 262 
 263     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 264     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 265     if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) {
 266       end++;
 267       beg = end;
 268       continue;
 269     }
 270 
 271     if ((end - beg + 1) == num) {
 272       // found the match
 273       break;
 274     }
 275 
 276     end++;
 277   };
 278 
 279   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 280 
 281   // Initialize regions:
 282   for (size_t i = beg; i <= end; i++) {
 283     ShenandoahHeapRegion* r = _heap->get_region(i);
 284     try_recycle_trashed(r);
 285 
 286     assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
 287     assert(r->is_empty(), "Should be empty");
 288 
 289     if (i == beg) {
 290       r->make_humongous_start();
 291     } else {
 292       r->make_humongous_cont();
 293     }
 294 
 295     // Trailing region may be non-full, record the remainder there
 296     size_t used_words;
 297     if ((i == end) && (remainder != 0)) {
 298       used_words = remainder;
 299     } else {
 300       used_words = ShenandoahHeapRegion::region_size_words();
 301     }
 302 
 303     r->set_top(r->bottom() + used_words);
 304     r->reset_alloc_metadata_to_shared();
 305 
 306     _mutator_free_bitmap.clear_bit(r->region_number());
 307   }
 308 
 309   // While individual regions report their true use, all humongous regions are
 310   // marked used in the free set.
 311   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 312 
 313   if (remainder != 0) {
 314     // Record this remainder as allocation waste
 315     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 316   }
 317 
 318   // Allocated at left/rightmost? Move the bounds appropriately.
 319   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 320     adjust_bounds();
 321   }
 322   assert_bounds();
 323 
 324   req.set_actual_size(words_size);
 325   return _heap->get_region(beg)->bottom();
 326 }
 327 
 328 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 329   return r->is_empty() || r->is_trash();
 330 }
 331 
 332 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 333   if (r->is_trash()) {
 334     // This would be recycled on allocation path
 335     return ShenandoahHeapRegion::region_size_bytes();
 336   } else {
 337     return r->free();
 338   }
 339 }
 340 
 341 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 342   return alloc_capacity(r) == 0;
 343 }
 344 
 345 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 346   if (r->is_trash()) {
 347     _heap->decrease_used(r->used());
 348     r->recycle();
 349   }
 350 }
 351 
 352 void ShenandoahFreeSet::recycle_trash() {
 353   // lock is not reentrable, check we don't have it
 354   assert_heaplock_not_owned_by_current_thread();
 355 
 356   for (size_t i = 0; i < _heap->num_regions(); i++) {
 357     ShenandoahHeapRegion* r = _heap->get_region(i);
 358     if (r->is_trash()) {
 359       ShenandoahHeapLocker locker(_heap->lock());
 360       try_recycle_trashed(r);
 361     }
 362     SpinPause(); // allow allocators to take the lock
 363   }
 364 }
 365 
 366 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 367   size_t idx = r->region_number();
 368 
 369   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 370   assert(is_empty_or_trash(r), "Should not be allocated");
 371 
 372   _mutator_free_bitmap.clear_bit(idx);
 373   _collector_free_bitmap.set_bit(idx);
 374   _collector_leftmost = MIN2(idx, _collector_leftmost);
 375   _collector_rightmost = MAX2(idx, _collector_rightmost);
 376 
 377   _capacity -= alloc_capacity(r);
 378 
 379   if (touches_bounds(idx)) {
 380     adjust_bounds();
 381   }
 382   assert_bounds();
 383 }
 384 
 385 void ShenandoahFreeSet::clear() {
 386   assert_heaplock_owned_by_current_thread();
 387   clear_internal();
 388 }
 389 
 390 void ShenandoahFreeSet::clear_internal() {
 391   _mutator_free_bitmap.clear();
 392   _collector_free_bitmap.clear();
 393   _mutator_leftmost = _max;
 394   _mutator_rightmost = 0;
 395   _collector_leftmost = _max;
 396   _collector_rightmost = 0;
 397   _capacity = 0;
 398   _used = 0;
 399 }
 400 
 401 void ShenandoahFreeSet::rebuild() {
 402   assert_heaplock_owned_by_current_thread();
 403   clear();
 404 
 405   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 406     ShenandoahHeapRegion* region = _heap->get_region(idx);
 407     if (region->is_alloc_allowed() || region->is_trash()) {
 408       assert(!region->in_collection_set(), "Shouldn't be adding those to the free set");
 409 
 410       // Do not add regions that would surely fail allocation
 411       if (has_no_alloc_capacity(region)) continue;
 412 
 413       _capacity += alloc_capacity(region);
 414       assert(_used <= _capacity, "must not use more than we have");
 415 
 416       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 417       _mutator_free_bitmap.set_bit(idx);
 418     }
 419   }
 420 
 421   // Evac reserve: reserve trailing space for evacuations
 422   size_t to_reserve = ShenandoahEvacReserve * _heap->capacity() / 100;
 423   size_t reserved = 0;
 424 
 425   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 426     if (reserved >= to_reserve) break;
 427 
 428     ShenandoahHeapRegion* region = _heap->get_region(idx);
 429     if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
 430       _mutator_free_bitmap.clear_bit(idx);
 431       _collector_free_bitmap.set_bit(idx);
 432       size_t ac = alloc_capacity(region);
 433       _capacity -= ac;
 434       reserved += ac;
 435     }
 436   }
 437 
 438   recompute_bounds();
 439   assert_bounds();
 440 }
 441 
 442 void ShenandoahFreeSet::log_status() {
 443   assert_heaplock_owned_by_current_thread();
 444 
 445   if (ShenandoahLogInfo || PrintGCDetails) {
 446     ResourceMark rm;
 447     outputStream* ls = gclog_or_tty;
 448 
 449     {
 450       size_t last_idx = 0;
 451       size_t max = 0;
 452       size_t max_contig = 0;
 453       size_t empty_contig = 0;
 454 
 455       size_t total_used = 0;
 456       size_t total_free = 0;
 457 
 458       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 459         if (is_mutator_free(idx)) {
 460           ShenandoahHeapRegion *r = _heap->get_region(idx);
 461           size_t free = alloc_capacity(r);
 462 
 463           max = MAX2(max, free);
 464 
 465           if (r->is_empty() && (last_idx + 1 == idx)) {
 466             empty_contig++;
 467           } else {
 468             empty_contig = 0;
 469           }
 470 
 471           total_used += r->used();
 472           total_free += free;
 473 
 474           max_contig = MAX2(max_contig, empty_contig);
 475           last_idx = idx;
 476         }
 477       }
 478 
 479       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 480       size_t free = capacity() - used();
 481 
 482       ls->print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ",
 483                total_free / M, mutator_count(), max / K, max_humongous / K);
 484 
 485       size_t frag_ext;
 486       if (free > 0) {
 487         frag_ext = 100 - (100 * max_humongous / free);
 488       } else {
 489         frag_ext = 0;
 490       }
 491       ls->print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
 492 
 493       size_t frag_int;
 494       if (mutator_count() > 0) {
 495         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 496       } else {
 497         frag_int = 0;
 498       }
 499       ls->print("Internal frag: " SIZE_FORMAT "%%", frag_int);
 500       ls->cr();
 501     }
 502 
 503     {
 504       size_t max = 0;
 505       size_t total_free = 0;
 506 
 507       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 508         if (is_collector_free(idx)) {
 509           ShenandoahHeapRegion *r = _heap->get_region(idx);
 510           size_t free = alloc_capacity(r);
 511           max = MAX2(max, free);
 512           total_free += free;
 513         }
 514       }
 515 
 516       ls->print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K",
 517                   total_free / M, collector_count(), max / K);
 518     }
 519   }
 520 }
 521 
 522 HeapWord* ShenandoahFreeSet::allocate(ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
 523   assert_heaplock_owned_by_current_thread();
 524   assert_bounds();
 525 
 526   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 527     switch (req.type()) {
 528       case ShenandoahHeap::_alloc_shared:
 529       case ShenandoahHeap::_alloc_shared_gc:
 530         in_new_region = true;
 531         return allocate_contiguous(req);
 532       case ShenandoahHeap::_alloc_gclab:
 533       case ShenandoahHeap::_alloc_tlab:
 534         in_new_region = false;
 535         assert(false, err_msg("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 536                               req.size(), ShenandoahHeapRegion::humongous_threshold_words()));
 537         return NULL;
 538       default:
 539         ShouldNotReachHere();
 540         return NULL;
 541     }
 542   } else {
 543     return allocate_single(req, in_new_region);
 544   }
 545 }
 546 
 547 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 548   // Deliberately not locked, this method is unsafe when free set is modified.
 549 
 550   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 551     if (index < _max && is_mutator_free(index)) {
 552       ShenandoahHeapRegion* r = _heap->get_region(index);
 553       if (r->free() >= MinTLABSize) {
 554         return r->free();
 555       }
 556     }
 557   }
 558 
 559   // It appears that no regions left
 560   return 0;
 561 }
 562 
 563 void ShenandoahFreeSet::print_on(outputStream* out) const {
 564   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 566     if (is_mutator_free(index)) {
 567       _heap->get_region(index)->print_on(out);
 568     }
 569   }
 570   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 571   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 572     if (is_collector_free(index)) {
 573       _heap->get_region(index)->print_on(out);
 574     }
 575   }
 576 }
 577 
 578 #ifdef ASSERT
 579 void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const {
 580   _heap->assert_heaplock_owned_by_current_thread();
 581 }
 582 
 583 
 584 
 585 void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const {
 586   _heap->assert_heaplock_not_owned_by_current_thread();
 587 }
 588 
 589 void ShenandoahFreeSet::assert_bounds() const {
 590   // Performance invariants. Failing these would not break the free set, but performance
 591   // would suffer.
 592   assert (_mutator_leftmost <= _max, err_msg("leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max));
 593   assert (_mutator_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max));
 594 
 595   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  err_msg("leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost));
 596   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost));
 597 
 598   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
 599   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
 600   assert (beg_off >= _mutator_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost));
 601   assert (end_off == _max,      err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost));
 602 
 603   assert (_collector_leftmost <= _max, err_msg("leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max));
 604   assert (_collector_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max));
 605 
 606   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  err_msg("leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost));
 607   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _collector_rightmost));
 608 
 609   beg_off = _collector_free_bitmap.get_next_one_offset(0);
 610   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
 611   assert (beg_off >= _collector_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost));
 612   assert (end_off == _max,      err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost));
 613 }
 614 #endif