1 /* 2 * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/shenandoahFreeSet.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 29 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 30 #include "logging/logStream.hpp" 31 32 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : 33 _heap(heap), 34 _mutator_free_bitmap(max_regions, mtGC), 35 _collector_free_bitmap(max_regions, mtGC), 36 _max(max_regions) 37 { 38 clear_internal(); 39 } 40 41 void ShenandoahFreeSet::increase_used(size_t num_bytes) { 42 shenandoah_assert_heaplocked(); 43 _used += num_bytes; 44 45 assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT 46 ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes); 47 } 48 49 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const { 50 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", 51 idx, _max, _mutator_leftmost, _mutator_rightmost); 52 return _mutator_free_bitmap.at(idx); 53 } 54 55 bool ShenandoahFreeSet::is_collector_free(size_t idx) const { 56 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", 57 idx, _max, _collector_leftmost, _collector_rightmost); 58 return _collector_free_bitmap.at(idx); 59 } 60 61 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { 62 // Scan the bitmap looking for a first fit. 63 // 64 // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally, 65 // we would find the region to allocate at right away. 66 // 67 // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs 68 // go to the end. This makes application allocation faster, because we would clear lots 69 // of regions from the beginning most of the time. 70 // 71 // Free set maintains mutator and collector views, and normally they allocate in their views only, 72 // unless we special cases for stealing and mixed allocations. 73 74 switch (req.type()) { 75 case ShenandoahAllocRequest::_alloc_tlab: 76 case ShenandoahAllocRequest::_alloc_shared: { 77 78 // Try to allocate in the mutator view 79 for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { 80 if (is_mutator_free(idx)) { 81 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); 82 if (result != NULL) { 83 return result; 84 } 85 } 86 } 87 88 // There is no recovery. Mutator does not touch collector view at all. 89 break; 90 } 91 case ShenandoahAllocRequest::_alloc_gclab: 92 case ShenandoahAllocRequest::_alloc_shared_gc: { 93 // size_t is unsigned, need to dodge underflow when _leftmost = 0 94 95 // Fast-path: try to allocate in the collector view first 96 for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) { 97 size_t idx = c - 1; 98 if (is_collector_free(idx)) { 99 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); 100 if (result != NULL) { 101 return result; 102 } 103 } 104 } 105 106 // No dice. Can we borrow space from mutator view? 107 if (!ShenandoahEvacReserveOverflow) { 108 return NULL; 109 } 110 111 // Try to steal the empty region from the mutator view 112 for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) { 113 size_t idx = c - 1; 114 if (is_mutator_free(idx)) { 115 ShenandoahHeapRegion* r = _heap->get_region(idx); 116 if (is_empty_or_trash(r)) { 117 flip_to_gc(r); 118 HeapWord *result = try_allocate_in(r, req, in_new_region); 119 if (result != NULL) { 120 return result; 121 } 122 } 123 } 124 } 125 126 // No dice. Do not try to mix mutator and GC allocations, because 127 // URWM moves due to GC allocations would expose unparsable mutator 128 // allocations. 129 130 break; 131 } 132 default: 133 ShouldNotReachHere(); 134 } 135 136 return NULL; 137 } 138 139 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { 140 assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); 141 142 try_recycle_trashed(r); 143 144 in_new_region = r->is_empty(); 145 146 HeapWord* result = NULL; 147 size_t size = req.size(); 148 149 if (ShenandoahElasticTLAB && req.is_lab_alloc()) { 150 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); 151 if (size > free) { 152 size = free; 153 } 154 if (size >= req.min_size()) { 155 result = r->allocate(size, req.type()); 156 assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size); 157 } 158 } else { 159 result = r->allocate(size, req.type()); 160 } 161 162 if (result != NULL) { 163 // Allocation successful, bump stats: 164 if (req.is_mutator_alloc()) { 165 increase_used(size * HeapWordSize); 166 } 167 168 // Record actual allocation size 169 req.set_actual_size(size); 170 171 if (req.is_gc_alloc()) { 172 r->set_update_watermark(r->top()); 173 } 174 } 175 176 if (result == NULL || has_no_alloc_capacity(r)) { 177 // Region cannot afford this or future allocations. Retire it. 178 // 179 // While this seems a bit harsh, especially in the case when this large allocation does not 180 // fit, but the next small one would, we are risking to inflate scan times when lots of 181 // almost-full regions precede the fully-empty region where we want allocate the entire TLAB. 182 // TODO: Record first fully-empty region, and use that for large allocations 183 184 // Record the remainder as allocation waste 185 if (req.is_mutator_alloc()) { 186 size_t waste = r->free(); 187 if (waste > 0) { 188 increase_used(waste); 189 _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true); 190 } 191 } 192 193 size_t num = r->index(); 194 _collector_free_bitmap.clear_bit(num); 195 _mutator_free_bitmap.clear_bit(num); 196 // Touched the bounds? Need to update: 197 if (touches_bounds(num)) { 198 adjust_bounds(); 199 } 200 assert_bounds(); 201 } 202 return result; 203 } 204 205 bool ShenandoahFreeSet::touches_bounds(size_t num) const { 206 return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost; 207 } 208 209 void ShenandoahFreeSet::recompute_bounds() { 210 // Reset to the most pessimistic case: 211 _mutator_rightmost = _max - 1; 212 _mutator_leftmost = 0; 213 _collector_rightmost = _max - 1; 214 _collector_leftmost = 0; 215 216 // ...and adjust from there 217 adjust_bounds(); 218 } 219 220 void ShenandoahFreeSet::adjust_bounds() { 221 // Rewind both mutator bounds until the next bit. 222 while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) { 223 _mutator_leftmost++; 224 } 225 while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) { 226 _mutator_rightmost--; 227 } 228 // Rewind both collector bounds until the next bit. 229 while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) { 230 _collector_leftmost++; 231 } 232 while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) { 233 _collector_rightmost--; 234 } 235 } 236 237 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { 238 shenandoah_assert_heaplocked(); 239 240 size_t words_size = req.size(); 241 size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 242 243 // No regions left to satisfy allocation, bye. 244 if (num > mutator_count()) { 245 return NULL; 246 } 247 248 // Find the continuous interval of $num regions, starting from $beg and ending in $end, 249 // inclusive. Contiguous allocations are biased to the beginning. 250 251 size_t beg = _mutator_leftmost; 252 size_t end = beg; 253 254 while (true) { 255 if (end >= _max) { 256 // Hit the end, goodbye 257 return NULL; 258 } 259 260 // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward. 261 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. 262 if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) { 263 end++; 264 beg = end; 265 continue; 266 } 267 268 if ((end - beg + 1) == num) { 269 // found the match 270 break; 271 } 272 273 end++; 274 }; 275 276 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 277 278 // Initialize regions: 279 for (size_t i = beg; i <= end; i++) { 280 ShenandoahHeapRegion* r = _heap->get_region(i); 281 try_recycle_trashed(r); 282 283 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); 284 assert(r->is_empty(), "Should be empty"); 285 286 if (i == beg) { 287 r->make_humongous_start(); 288 } else { 289 r->make_humongous_cont(); 290 } 291 292 // Trailing region may be non-full, record the remainder there 293 size_t used_words; 294 if ((i == end) && (remainder != 0)) { 295 used_words = remainder; 296 } else { 297 used_words = ShenandoahHeapRegion::region_size_words(); 298 } 299 300 r->set_top(r->bottom() + used_words); 301 302 _mutator_free_bitmap.clear_bit(r->index()); 303 } 304 305 // While individual regions report their true use, all humongous regions are 306 // marked used in the free set. 307 increase_used(ShenandoahHeapRegion::region_size_bytes() * num); 308 309 if (remainder != 0) { 310 // Record this remainder as allocation waste 311 _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); 312 } 313 314 // Allocated at left/rightmost? Move the bounds appropriately. 315 if (beg == _mutator_leftmost || end == _mutator_rightmost) { 316 adjust_bounds(); 317 } 318 assert_bounds(); 319 320 req.set_actual_size(words_size); 321 return _heap->get_region(beg)->bottom(); 322 } 323 324 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) { 325 return r->is_empty() || r->is_trash(); 326 } 327 328 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) { 329 if (r->is_trash()) { 330 // This would be recycled on allocation path 331 return ShenandoahHeapRegion::region_size_bytes(); 332 } else { 333 return r->free(); 334 } 335 } 336 337 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) { 338 return alloc_capacity(r) == 0; 339 } 340 341 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { 342 if (r->is_trash()) { 343 _heap->decrease_used(r->used()); 344 r->recycle(); 345 } 346 } 347 348 void ShenandoahFreeSet::recycle_trash() { 349 // lock is not reentrable, check we don't have it 350 shenandoah_assert_not_heaplocked(); 351 352 for (size_t i = 0; i < _heap->num_regions(); i++) { 353 ShenandoahHeapRegion* r = _heap->get_region(i); 354 if (r->is_trash()) { 355 ShenandoahHeapLocker locker(_heap->lock()); 356 try_recycle_trashed(r); 357 } 358 SpinPause(); // allow allocators to take the lock 359 } 360 } 361 362 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { 363 size_t idx = r->index(); 364 365 assert(_mutator_free_bitmap.at(idx), "Should be in mutator view"); 366 assert(is_empty_or_trash(r), "Should not be allocated"); 367 368 _mutator_free_bitmap.clear_bit(idx); 369 _collector_free_bitmap.set_bit(idx); 370 _collector_leftmost = MIN2(idx, _collector_leftmost); 371 _collector_rightmost = MAX2(idx, _collector_rightmost); 372 373 _capacity -= alloc_capacity(r); 374 375 if (touches_bounds(idx)) { 376 adjust_bounds(); 377 } 378 assert_bounds(); 379 } 380 381 void ShenandoahFreeSet::clear() { 382 shenandoah_assert_heaplocked(); 383 clear_internal(); 384 } 385 386 void ShenandoahFreeSet::clear_internal() { 387 _mutator_free_bitmap.clear(); 388 _collector_free_bitmap.clear(); 389 _mutator_leftmost = _max; 390 _mutator_rightmost = 0; 391 _collector_leftmost = _max; 392 _collector_rightmost = 0; 393 _capacity = 0; 394 _used = 0; 395 } 396 397 void ShenandoahFreeSet::rebuild() { 398 shenandoah_assert_heaplocked(); 399 clear(); 400 401 for (size_t idx = 0; idx < _heap->num_regions(); idx++) { 402 ShenandoahHeapRegion* region = _heap->get_region(idx); 403 if (region->is_alloc_allowed() || region->is_trash()) { 404 assert(!region->is_cset(), "Shouldn't be adding those to the free set"); 405 406 // Do not add regions that would surely fail allocation 407 if (has_no_alloc_capacity(region)) continue; 408 409 _capacity += alloc_capacity(region); 410 assert(_used <= _capacity, "must not use more than we have"); 411 412 assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already"); 413 _mutator_free_bitmap.set_bit(idx); 414 } 415 } 416 417 // Evac reserve: reserve trailing space for evacuations 418 size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve; 419 size_t reserved = 0; 420 421 for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) { 422 if (reserved >= to_reserve) break; 423 424 ShenandoahHeapRegion* region = _heap->get_region(idx); 425 if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) { 426 _mutator_free_bitmap.clear_bit(idx); 427 _collector_free_bitmap.set_bit(idx); 428 size_t ac = alloc_capacity(region); 429 _capacity -= ac; 430 reserved += ac; 431 } 432 } 433 434 recompute_bounds(); 435 assert_bounds(); 436 } 437 438 void ShenandoahFreeSet::log_status() { 439 shenandoah_assert_heaplocked(); 440 441 LogTarget(Info, gc, ergo) lt; 442 if (lt.is_enabled()) { 443 ResourceMark rm; 444 LogStream ls(lt); 445 446 { 447 size_t last_idx = 0; 448 size_t max = 0; 449 size_t max_contig = 0; 450 size_t empty_contig = 0; 451 452 size_t total_used = 0; 453 size_t total_free = 0; 454 size_t total_free_ext = 0; 455 456 for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { 457 if (is_mutator_free(idx)) { 458 ShenandoahHeapRegion *r = _heap->get_region(idx); 459 size_t free = alloc_capacity(r); 460 461 max = MAX2(max, free); 462 463 if (r->is_empty()) { 464 total_free_ext += free; 465 if (last_idx + 1 == idx) { 466 empty_contig++; 467 } else { 468 empty_contig = 1; 469 } 470 } else { 471 empty_contig = 0; 472 } 473 474 total_used += r->used(); 475 total_free += free; 476 477 max_contig = MAX2(max_contig, empty_contig); 478 last_idx = idx; 479 } 480 } 481 482 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); 483 size_t free = capacity() - used(); 484 485 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", 486 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 487 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 488 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) 489 ); 490 491 ls.print("Frag: "); 492 size_t frag_ext; 493 if (total_free_ext > 0) { 494 frag_ext = 100 - (100 * max_humongous / total_free_ext); 495 } else { 496 frag_ext = 0; 497 } 498 ls.print(SIZE_FORMAT "%% external, ", frag_ext); 499 500 size_t frag_int; 501 if (mutator_count() > 0) { 502 frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes()); 503 } else { 504 frag_int = 0; 505 } 506 ls.print(SIZE_FORMAT "%% internal; ", frag_int); 507 } 508 509 { 510 size_t max = 0; 511 size_t total_free = 0; 512 513 for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) { 514 if (is_collector_free(idx)) { 515 ShenandoahHeapRegion *r = _heap->get_region(idx); 516 size_t free = alloc_capacity(r); 517 max = MAX2(max, free); 518 total_free += free; 519 } 520 } 521 522 ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s", 523 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 524 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max)); 525 } 526 } 527 } 528 529 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { 530 shenandoah_assert_heaplocked(); 531 assert_bounds(); 532 533 if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { 534 switch (req.type()) { 535 case ShenandoahAllocRequest::_alloc_shared: 536 case ShenandoahAllocRequest::_alloc_shared_gc: 537 in_new_region = true; 538 return allocate_contiguous(req); 539 case ShenandoahAllocRequest::_alloc_gclab: 540 case ShenandoahAllocRequest::_alloc_tlab: 541 in_new_region = false; 542 assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, 543 req.size(), ShenandoahHeapRegion::humongous_threshold_words()); 544 return NULL; 545 default: 546 ShouldNotReachHere(); 547 return NULL; 548 } 549 } else { 550 return allocate_single(req, in_new_region); 551 } 552 } 553 554 size_t ShenandoahFreeSet::unsafe_peek_free() const { 555 // Deliberately not locked, this method is unsafe when free set is modified. 556 557 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 558 if (index < _max && is_mutator_free(index)) { 559 ShenandoahHeapRegion* r = _heap->get_region(index); 560 if (r->free() >= MinTLABSize) { 561 return r->free(); 562 } 563 } 564 } 565 566 // It appears that no regions left 567 return 0; 568 } 569 570 void ShenandoahFreeSet::print_on(outputStream* out) const { 571 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count()); 572 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 573 if (is_mutator_free(index)) { 574 _heap->get_region(index)->print_on(out); 575 } 576 } 577 out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count()); 578 for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) { 579 if (is_collector_free(index)) { 580 _heap->get_region(index)->print_on(out); 581 } 582 } 583 } 584 585 /* 586 * Internal fragmentation metric: describes how fragmented the heap regions are. 587 * 588 * It is derived as: 589 * 590 * sum(used[i]^2, i=0..k) 591 * IF = 1 - ------------------------------ 592 * C * sum(used[i], i=0..k) 593 * 594 * ...where k is the number of regions in computation, C is the region capacity, and 595 * used[i] is the used space in the region. 596 * 597 * The non-linearity causes IF to be lower for the cases where the same total heap 598 * used is densely packed. For example: 599 * a) Heap is completely full => IF = 0 600 * b) Heap is half full, first 50% regions are completely full => IF = 0 601 * c) Heap is half full, each region is 50% full => IF = 1/2 602 * d) Heap is quarter full, first 50% regions are completely full => IF = 0 603 * e) Heap is quarter full, each region is 25% full => IF = 3/4 604 * f) Heap has one small object per each region => IF =~ 1 605 */ 606 double ShenandoahFreeSet::internal_fragmentation() { 607 double squared = 0; 608 double linear = 0; 609 int count = 0; 610 611 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 612 if (is_mutator_free(index)) { 613 ShenandoahHeapRegion* r = _heap->get_region(index); 614 size_t used = r->used(); 615 squared += used * used; 616 linear += used; 617 count++; 618 } 619 } 620 621 if (count > 0) { 622 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear); 623 return 1 - s; 624 } else { 625 return 0; 626 } 627 } 628 629 /* 630 * External fragmentation metric: describes how fragmented the heap is. 631 * 632 * It is derived as: 633 * 634 * EF = 1 - largest_contiguous_free / total_free 635 * 636 * For example: 637 * a) Heap is completely empty => EF = 0 638 * b) Heap is completely full => EF = 0 639 * c) Heap is first-half full => EF = 1/2 640 * d) Heap is half full, full and empty regions interleave => EF =~ 1 641 */ 642 double ShenandoahFreeSet::external_fragmentation() { 643 size_t last_idx = 0; 644 size_t max_contig = 0; 645 size_t empty_contig = 0; 646 647 size_t free = 0; 648 649 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 650 if (is_mutator_free(index)) { 651 ShenandoahHeapRegion* r = _heap->get_region(index); 652 if (r->is_empty()) { 653 free += ShenandoahHeapRegion::region_size_bytes(); 654 if (last_idx + 1 == index) { 655 empty_contig++; 656 } else { 657 empty_contig = 1; 658 } 659 } else { 660 empty_contig = 0; 661 } 662 663 max_contig = MAX2(max_contig, empty_contig); 664 last_idx = index; 665 } 666 } 667 668 if (free > 0) { 669 return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free); 670 } else { 671 return 0; 672 } 673 } 674 675 #ifdef ASSERT 676 void ShenandoahFreeSet::assert_bounds() const { 677 // Performance invariants. Failing these would not break the free set, but performance 678 // would suffer. 679 assert (_mutator_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max); 680 assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max); 681 682 assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), "leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost); 683 assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost); 684 685 size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0); 686 size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1); 687 assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost); 688 assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost); 689 690 assert (_collector_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max); 691 assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max); 692 693 assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), "leftmost region should be free: " SIZE_FORMAT, _collector_leftmost); 694 assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost); 695 696 beg_off = _collector_free_bitmap.get_next_one_offset(0); 697 end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1); 698 assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost); 699 assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost); 700 } 701 #endif