1 /*
   2  * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  28 
  29 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap,size_t max_regions) :
  30         _heap(heap),
  31         _mutator_free_bitmap(max_regions, /* in_resource_area = */ false),
  32         _collector_free_bitmap(max_regions, /* in_resource_area = */ false),
  33         _max(max_regions)
  34 {
  35   clear_internal();
  36 }
  37 
  38 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  39   assert_heaplock_owned_by_current_thread();
  40   _used += num_bytes;
  41 
  42   assert(_used <= _capacity, err_msg("must not use more than we have: used: " SIZE_FORMAT
  43                                      ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT,
  44                                      _used, _capacity, num_bytes));
  45 }
  46 
  47 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  48   assert (idx < _max,
  49           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  50                   idx, _max, _mutator_leftmost, _mutator_rightmost));
  51   return _mutator_free_bitmap.at(idx);
  52 }
  53 
  54 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  55   assert (idx < _max,
  56           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  57                   idx, _max, _collector_leftmost, _collector_rightmost));
  58   return _collector_free_bitmap.at(idx);
  59 }
  60 
  61 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
  62   // Scan the bitmap looking for a first fit.
  63   //
  64   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  65   // we would find the region to allocate at right away.
  66   //
  67   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  68   // go to the end. This makes application allocation faster, because we would clear lots
  69   // of regions from the beginning most of the time.
  70   //
  71   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  72   // unless we special cases for stealing and mixed allocations.
  73 
  74   switch (req.type()) {
  75     case ShenandoahAllocRequest::_alloc_tlab:
  76     case ShenandoahAllocRequest::_alloc_shared: {
  77 
  78       // Try to allocate in the mutator view
  79       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  80         if (is_mutator_free(idx)) {
  81           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  82           if (result != NULL) {
  83             return result;
  84           }
  85         }
  86       }
  87 
  88       // There is no recovery. Mutator does not touch collector view at all.
  89       break;
  90     }
  91     case ShenandoahAllocRequest::_alloc_gclab:
  92     case ShenandoahAllocRequest::_alloc_shared_gc: {
  93       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  94 
  95       // Fast-path: try to allocate in the collector view first
  96       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  97         size_t idx = c - 1;
  98         if (is_collector_free(idx)) {
  99           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 100           if (result != NULL) {
 101             return result;
 102           }
 103         }
 104       }
 105 
 106       // No dice. Can we borrow space from mutator view?
 107       if (!ShenandoahEvacReserveOverflow) {
 108         return NULL;
 109       }
 110 
 111       // Try to steal the empty region from the mutator view
 112       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 113         size_t idx = c - 1;
 114         if (is_mutator_free(idx)) {
 115           ShenandoahHeapRegion* r = _heap->get_region(idx);
 116           if (is_empty_or_trash(r)) {
 117             flip_to_gc(r);
 118             HeapWord *result = try_allocate_in(r, req, in_new_region);
 119             if (result != NULL) {
 120               return result;
 121             }
 122           }
 123         }
 124       }
 125 
 126       // Try to mix the allocation into the mutator view:
 127       if (ShenandoahAllowMixedAllocs) {
 128         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 129           size_t idx = c - 1;
 130           if (is_mutator_free(idx)) {
 131             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 132             if (result != NULL) {
 133               return result;
 134             }
 135           }
 136         }
 137       }
 138       break;
 139     }
 140     default:
 141       ShouldNotReachHere();
 142   }
 143 
 144   return NULL;
 145 }
 146 
 147 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 148   assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number()));
 149 
 150   try_recycle_trashed(r);
 151 
 152   in_new_region = r->is_empty();
 153 
 154   HeapWord* result = NULL;
 155   size_t size = req.size();
 156 
 157   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 158     size_t free = align_size_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 159     if (size > free) {
 160       size = free;
 161     }
 162     if (size >= req.min_size()) {
 163       result = r->allocate(size, req.type());
 164       assert (result != NULL, err_msg("Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size));
 165     }
 166   } else {
 167     result = r->allocate(size, req.type());
 168   }
 169 
 170   if (result != NULL) {
 171     // Allocation successful, bump stats:
 172     if (req.is_mutator_alloc()) {
 173       increase_used(size * HeapWordSize);
 174     }
 175 
 176     // Record actual allocation size
 177     req.set_actual_size(size);
 178   }
 179 
 180   if (result == NULL || has_no_alloc_capacity(r)) {
 181     // Region cannot afford this or future allocations. Retire it.
 182     //
 183     // While this seems a bit harsh, especially in the case when this large allocation does not
 184     // fit, but the next small one would, we are risking to inflate scan times when lots of
 185     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 186     // TODO: Record first fully-empty region, and use that for large allocations
 187 
 188     // Record the remainder as allocation waste
 189     if (req.is_mutator_alloc()) {
 190       size_t waste = r->free();
 191       if (waste > 0) {
 192         increase_used(waste);
 193         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 194       }
 195     }
 196 
 197     size_t num = r->region_number();
 198     _collector_free_bitmap.clear_bit(num);
 199     _mutator_free_bitmap.clear_bit(num);
 200     // Touched the bounds? Need to update:
 201     if (touches_bounds(num)) {
 202       adjust_bounds();
 203     }
 204     assert_bounds();
 205   }
 206   return result;
 207 }
 208 
 209 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 210   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 211 }
 212 
 213 void ShenandoahFreeSet::recompute_bounds() {
 214   // Reset to the most pessimistic case:
 215   _mutator_rightmost = _max - 1;
 216   _mutator_leftmost = 0;
 217   _collector_rightmost = _max - 1;
 218   _collector_leftmost = 0;
 219 
 220   // ...and adjust from there
 221   adjust_bounds();
 222 }
 223 
 224 void ShenandoahFreeSet::adjust_bounds() {
 225   // Rewind both mutator bounds until the next bit.
 226   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 227     _mutator_leftmost++;
 228   }
 229   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 230     _mutator_rightmost--;
 231   }
 232   // Rewind both collector bounds until the next bit.
 233   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 234     _collector_leftmost++;
 235   }
 236   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 237     _collector_rightmost--;
 238   }
 239 }
 240 
 241 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 242   assert_heaplock_owned_by_current_thread();
 243 
 244   size_t words_size = req.size();
 245   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 246 
 247   // No regions left to satisfy allocation, bye.
 248   if (num > mutator_count()) {
 249     return NULL;
 250   }
 251 
 252   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 253   // inclusive. Contiguous allocations are biased to the beginning.
 254 
 255   size_t beg = _mutator_leftmost;
 256   size_t end = beg;
 257 
 258   while (true) {
 259     if (end >= _max) {
 260       // Hit the end, goodbye
 261       return NULL;
 262     }
 263 
 264     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 265     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 266     if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) {
 267       end++;
 268       beg = end;
 269       continue;
 270     }
 271 
 272     if ((end - beg + 1) == num) {
 273       // found the match
 274       break;
 275     }
 276 
 277     end++;
 278   };
 279 
 280   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 281 
 282   // Initialize regions:
 283   for (size_t i = beg; i <= end; i++) {
 284     ShenandoahHeapRegion* r = _heap->get_region(i);
 285     try_recycle_trashed(r);
 286 
 287     assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
 288     assert(r->is_empty(), "Should be empty");
 289 
 290     if (i == beg) {
 291       r->make_humongous_start();
 292     } else {
 293       r->make_humongous_cont();
 294     }
 295 
 296     // Trailing region may be non-full, record the remainder there
 297     size_t used_words;
 298     if ((i == end) && (remainder != 0)) {
 299       used_words = remainder;
 300     } else {
 301       used_words = ShenandoahHeapRegion::region_size_words();
 302     }
 303 
 304     r->set_top(r->bottom() + used_words);
 305     r->reset_alloc_metadata_to_shared();
 306 
 307     _mutator_free_bitmap.clear_bit(r->region_number());
 308   }
 309 
 310   // While individual regions report their true use, all humongous regions are
 311   // marked used in the free set.
 312   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 313 
 314   if (remainder != 0) {
 315     // Record this remainder as allocation waste
 316     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 317   }
 318 
 319   // Allocated at left/rightmost? Move the bounds appropriately.
 320   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 321     adjust_bounds();
 322   }
 323   assert_bounds();
 324 
 325   req.set_actual_size(words_size);
 326   return _heap->get_region(beg)->bottom();
 327 }
 328 
 329 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 330   return r->is_empty() || r->is_trash();
 331 }
 332 
 333 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 334   if (r->is_trash()) {
 335     // This would be recycled on allocation path
 336     return ShenandoahHeapRegion::region_size_bytes();
 337   } else {
 338     return r->free();
 339   }
 340 }
 341 
 342 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 343   return alloc_capacity(r) == 0;
 344 }
 345 
 346 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 347   if (r->is_trash()) {
 348     _heap->decrease_used(r->used());
 349     r->recycle();
 350   }
 351 }
 352 
 353 void ShenandoahFreeSet::recycle_trash() {
 354   // lock is not reentrable, check we don't have it
 355   assert_heaplock_not_owned_by_current_thread();
 356 
 357   for (size_t i = 0; i < _heap->num_regions(); i++) {
 358     ShenandoahHeapRegion* r = _heap->get_region(i);
 359     if (r->is_trash()) {
 360       ShenandoahHeapLocker locker(_heap->lock());
 361       try_recycle_trashed(r);
 362     }
 363     SpinPause(); // allow allocators to take the lock
 364   }
 365 }
 366 
 367 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 368   size_t idx = r->region_number();
 369 
 370   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 371   assert(is_empty_or_trash(r), "Should not be allocated");
 372 
 373   _mutator_free_bitmap.clear_bit(idx);
 374   _collector_free_bitmap.set_bit(idx);
 375   _collector_leftmost = MIN2(idx, _collector_leftmost);
 376   _collector_rightmost = MAX2(idx, _collector_rightmost);
 377 
 378   _capacity -= alloc_capacity(r);
 379 
 380   if (touches_bounds(idx)) {
 381     adjust_bounds();
 382   }
 383   assert_bounds();
 384 }
 385 
 386 void ShenandoahFreeSet::clear() {
 387   assert_heaplock_owned_by_current_thread();
 388   clear_internal();
 389 }
 390 
 391 void ShenandoahFreeSet::clear_internal() {
 392   _mutator_free_bitmap.clear();
 393   _collector_free_bitmap.clear();
 394   _mutator_leftmost = _max;
 395   _mutator_rightmost = 0;
 396   _collector_leftmost = _max;
 397   _collector_rightmost = 0;
 398   _capacity = 0;
 399   _used = 0;
 400 }
 401 
 402 void ShenandoahFreeSet::rebuild() {
 403   assert_heaplock_owned_by_current_thread();
 404   clear();
 405 
 406   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 407     ShenandoahHeapRegion* region = _heap->get_region(idx);
 408     if (region->is_alloc_allowed() || region->is_trash()) {
 409       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 410 
 411       // Do not add regions that would surely fail allocation
 412       if (has_no_alloc_capacity(region)) continue;
 413 
 414       _capacity += alloc_capacity(region);
 415       assert(_used <= _capacity, "must not use more than we have");
 416 
 417       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 418       _mutator_free_bitmap.set_bit(idx);
 419     }
 420   }
 421 
 422   // Evac reserve: reserve trailing space for evacuations
 423   size_t to_reserve = ShenandoahEvacReserve * _heap->capacity() / 100;
 424   size_t reserved = 0;
 425 
 426   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 427     if (reserved >= to_reserve) break;
 428 
 429     ShenandoahHeapRegion* region = _heap->get_region(idx);
 430     if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
 431       _mutator_free_bitmap.clear_bit(idx);
 432       _collector_free_bitmap.set_bit(idx);
 433       size_t ac = alloc_capacity(region);
 434       _capacity -= ac;
 435       reserved += ac;
 436     }
 437   }
 438 
 439   recompute_bounds();
 440   assert_bounds();
 441 }
 442 
 443 void ShenandoahFreeSet::log_status() {
 444   assert_heaplock_owned_by_current_thread();
 445 
 446   if (ShenandoahLogInfo || PrintGCDetails) {
 447     ResourceMark rm;
 448     outputStream* ls = gclog_or_tty;
 449 
 450     {
 451       size_t last_idx = 0;
 452       size_t max = 0;
 453       size_t max_contig = 0;
 454       size_t empty_contig = 0;
 455 
 456       size_t total_used = 0;
 457       size_t total_free = 0;
 458 
 459       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 460         if (is_mutator_free(idx)) {
 461           ShenandoahHeapRegion *r = _heap->get_region(idx);
 462           size_t free = alloc_capacity(r);
 463 
 464           max = MAX2(max, free);
 465 
 466           if (r->is_empty() && (last_idx + 1 == idx)) {
 467             empty_contig++;
 468           } else {
 469             empty_contig = 0;
 470           }
 471 
 472           total_used += r->used();
 473           total_free += free;
 474 
 475           max_contig = MAX2(max_contig, empty_contig);
 476           last_idx = idx;
 477         }
 478       }
 479 
 480       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 481       size_t free = capacity() - used();
 482 
 483       ls->print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ",
 484                total_free / M, mutator_count(), max / K, max_humongous / K);
 485 
 486       size_t frag_ext;
 487       if (free > 0) {
 488         frag_ext = 100 - (100 * max_humongous / free);
 489       } else {
 490         frag_ext = 0;
 491       }
 492       ls->print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
 493 
 494       size_t frag_int;
 495       if (mutator_count() > 0) {
 496         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 497       } else {
 498         frag_int = 0;
 499       }
 500       ls->print("Internal frag: " SIZE_FORMAT "%%", frag_int);
 501       ls->cr();
 502     }
 503 
 504     {
 505       size_t max = 0;
 506       size_t total_free = 0;
 507 
 508       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 509         if (is_collector_free(idx)) {
 510           ShenandoahHeapRegion *r = _heap->get_region(idx);
 511           size_t free = alloc_capacity(r);
 512           max = MAX2(max, free);
 513           total_free += free;
 514         }
 515       }
 516 
 517       ls->print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K",
 518                   total_free / M, collector_count(), max / K);
 519     }
 520   }
 521 }
 522 
 523 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 524   assert_heaplock_owned_by_current_thread();
 525   assert_bounds();
 526 
 527   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 528     switch (req.type()) {
 529       case ShenandoahAllocRequest::_alloc_shared:
 530       case ShenandoahAllocRequest::_alloc_shared_gc:
 531         in_new_region = true;
 532         return allocate_contiguous(req);
 533       case ShenandoahAllocRequest::_alloc_gclab:
 534       case ShenandoahAllocRequest::_alloc_tlab:
 535         in_new_region = false;
 536         assert(false, err_msg("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 537                               req.size(), ShenandoahHeapRegion::humongous_threshold_words()));
 538         return NULL;
 539       default:
 540         ShouldNotReachHere();
 541         return NULL;
 542     }
 543   } else {
 544     return allocate_single(req, in_new_region);
 545   }
 546 }
 547 
 548 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 549   // Deliberately not locked, this method is unsafe when free set is modified.
 550 
 551   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 552     if (index < _max && is_mutator_free(index)) {
 553       ShenandoahHeapRegion* r = _heap->get_region(index);
 554       if (r->free() >= MinTLABSize) {
 555         return r->free();
 556       }
 557     }
 558   }
 559 
 560   // It appears that no regions left
 561   return 0;
 562 }
 563 
 564 void ShenandoahFreeSet::print_on(outputStream* out) const {
 565   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 566   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 567     if (is_mutator_free(index)) {
 568       _heap->get_region(index)->print_on(out);
 569     }
 570   }
 571   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 572   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 573     if (is_collector_free(index)) {
 574       _heap->get_region(index)->print_on(out);
 575     }
 576   }
 577 }
 578 
 579 #ifdef ASSERT
 580 void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const {
 581   _heap->assert_heaplock_owned_by_current_thread();
 582 }
 583 
 584 void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const {
 585   _heap->assert_heaplock_not_owned_by_current_thread();
 586 }
 587 
 588 void ShenandoahFreeSet::assert_bounds() const {
 589   // Performance invariants. Failing these would not break the free set, but performance
 590   // would suffer.
 591   assert (_mutator_leftmost <= _max, err_msg("leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max));
 592   assert (_mutator_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max));
 593 
 594   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  err_msg("leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost));
 595   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost));
 596 
 597   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
 598   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
 599   assert (beg_off >= _mutator_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost));
 600   assert (end_off == _max,      err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost));
 601 
 602   assert (_collector_leftmost <= _max, err_msg("leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max));
 603   assert (_collector_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max));
 604 
 605   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  err_msg("leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost));
 606   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _collector_rightmost));
 607 
 608   beg_off = _collector_free_bitmap.get_next_one_offset(0);
 609   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
 610   assert (beg_off >= _collector_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost));
 611   assert (end_off == _max,      err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost));
 612 }
 613 #endif