< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.cpp

Print this page
rev 10540 : [backport] CollectedHeap::max_tlab_size is measured in words
rev 10562 : [backport] Report actual free size in non-verbose FreeSet status
rev 10564 : [backport] Refactor allocation path to accept ShenandoahAllocRequest tuple
rev 10565 : [backport] Elastic TLABs support for Shenandoah
rev 10569 : [backport] Hook up GCLABs to Elastic LAB support
rev 10571 : [backport] Complete liveness for recently allocated regions outside the allocation path
rev 10605 : [backport] Fix ShHeap::notify_alloc usages: it accepts words, not bytes
rev 10620 : [backport] Evac reserve: make sure GC has untouchable space to move the objects into
rev 10621 : [backport] Refactor FreeSet logging: support evac-reserve, denser printouts


  40 
  41   assert(_used <= _capacity, err_msg("must not use more than we have: used: "SIZE_FORMAT
  42                                      ", capacity: "SIZE_FORMAT", num_bytes: "SIZE_FORMAT,
  43                                      _used, _capacity, num_bytes));
  44 }
  45 
  46 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  47   assert (idx < _max,
  48           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  49                   idx, _max, _mutator_leftmost, _mutator_rightmost));
  50   return _mutator_free_bitmap.at(idx);
  51 }
  52 
  53 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  54   assert (idx < _max,
  55           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  56                   idx, _max, _collector_leftmost, _collector_rightmost));
  57   return _collector_free_bitmap.at(idx);
  58 }
  59 
  60 HeapWord* ShenandoahFreeSet::allocate_single(size_t word_size, ShenandoahHeap::AllocType type, bool& in_new_region) {
  61   // Scan the bitmap looking for a first fit.
  62   //
  63   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  64   // we would find the region to allocate at right away.
  65   //
  66   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  67   // go to the end. This makes application allocation faster, because we would clear lots
  68   // of regions from the beginning most of the time.
  69   //
  70   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  71   // unless we special cases for stealing and mixed allocations.
  72 
  73   switch (type) {
  74     case ShenandoahHeap::_alloc_tlab:
  75     case ShenandoahHeap::_alloc_shared: {
  76 
  77       // Fast-path: try to allocate in the mutator view first:
  78       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  79         if (is_mutator_free(idx)) {
  80           HeapWord* result = try_allocate_in(_heap->get_region(idx), word_size, type, in_new_region);
  81           if (result != NULL) {
  82             return result;
  83           }
  84         }
  85       }
  86 
  87       // Recovery: try to steal the empty region from the collector view:
  88       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
  89         if (is_collector_free(idx)) {
  90           ShenandoahHeapRegion* r = _heap->get_region(idx);
  91           if (is_empty_or_trash(r)) {
  92             flip_to_mutator(idx);
  93             HeapWord *result = try_allocate_in(r, word_size, type, in_new_region);
  94             if (result != NULL) {
  95               return result;
  96             }
  97           }
  98         }
  99       }
 100 
 101       // Recovery: try to mix the allocation into the collector view:
 102       if (ShenandoahAllowMixedAllocs) {
 103         for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 104           if (is_collector_free(idx)) {
 105             HeapWord* result = try_allocate_in(_heap->get_region(idx), word_size, type, in_new_region);
 106             if (result != NULL) {
 107               return result;
 108             }
 109           }
 110         }
 111       }
 112 
 113       break;
 114     }
 115     case ShenandoahHeap::_alloc_gclab:
 116     case ShenandoahHeap::_alloc_shared_gc: {
 117       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 118 
 119       // Fast-path: try to allocate in the collector view first:
 120       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 121         size_t idx = c - 1;
 122         if (is_collector_free(idx)) {
 123           HeapWord* result = try_allocate_in(_heap->get_region(idx), word_size, type, in_new_region);
 124           if (result != NULL) {
 125             return result;
 126           }
 127         }
 128       }
 129 
 130       // Recovery: try to steal the empty region from the mutator view:





 131       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 132         size_t idx = c - 1;
 133         if (is_mutator_free(idx)) {
 134           ShenandoahHeapRegion* r = _heap->get_region(idx);
 135           if (is_empty_or_trash(r)) {
 136             flip_to_gc(idx);
 137             HeapWord *result = try_allocate_in(r, word_size, type, in_new_region);
 138             if (result != NULL) {
 139               return result;
 140             }
 141           }
 142         }
 143       }
 144 
 145       // Recovery: try to mix the allocation into the mutator view:
 146       if (ShenandoahAllowMixedAllocs) {
 147         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 148           size_t idx = c - 1;
 149           if (is_mutator_free(idx)) {
 150             HeapWord* result = try_allocate_in(_heap->get_region(idx), word_size, type, in_new_region);
 151             if (result != NULL) {
 152               return result;
 153             }
 154           }
 155         }
 156       }
 157 
 158       break;
 159     }
 160     default:
 161       ShouldNotReachHere();
 162   }
 163 
 164   return NULL;
 165 }
 166 
 167 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, size_t word_size, ShenandoahHeap::AllocType type, bool& in_new_region) {
 168   assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number()));
 169 
 170   try_recycle_trashed(r);
 171 
 172   in_new_region = r->is_empty();
 173 
 174   HeapWord* result = r->allocate(word_size, type);














 175 
 176   if (result != NULL) {
 177     // Allocation successful, bump live data stats:
 178     r->increase_live_data_alloc_words(word_size);
 179     increase_used(word_size * HeapWordSize);




 180   }
 181 
 182   if (result == NULL || has_no_alloc_capacity(r)) {
 183     // Region cannot afford this or future allocations. Retire it.
 184     //
 185     // While this seems a bit harsh, especially in the case when this large allocation does not
 186     // fit, but the next small one would, we are risking to inflate scan times when lots of
 187     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 188     // TODO: Record first fully-empty region, and use that for large allocations
 189 
 190     // Record the remainder as allocation waste

 191     size_t waste = r->free();
 192     if (waste > 0) {
 193       increase_used(waste);
 194       _heap->notify_alloc(waste, true);

 195     }
 196 
 197     size_t num = r->region_number();
 198     _collector_free_bitmap.clear_bit(num);
 199     _mutator_free_bitmap.clear_bit(num);
 200     // Touched the bounds? Need to update:
 201     if (touches_bounds(num)) {
 202       adjust_bounds();
 203     }
 204     assert_bounds();
 205   }
 206   return result;
 207 }
 208 
 209 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 210   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 211 }
 212 











 213 void ShenandoahFreeSet::adjust_bounds() {
 214   // Rewind both mutator bounds until the next bit.
 215   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 216     _mutator_leftmost++;
 217   }
 218   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 219     _mutator_rightmost--;
 220   }
 221   // Rewind both collector bounds until the next bit.
 222   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 223     _collector_leftmost++;
 224   }
 225   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 226     _collector_rightmost--;
 227   }
 228 }
 229 
 230 HeapWord* ShenandoahFreeSet::allocate_contiguous(size_t words_size) {
 231   assert_heaplock_owned_by_current_thread();
 232 

 233   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 234 
 235   // No regions left to satisfy allocation, bye.
 236   if (num > mutator_count()) {
 237     return NULL;
 238   }
 239 
 240   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 241   // inclusive. Contiguous allocations are biased to the beginning.
 242 
 243   size_t beg = _mutator_leftmost;
 244   size_t end = beg;
 245 
 246   while (true) {
 247     if (end >= _max) {
 248       // Hit the end, goodbye
 249       return NULL;
 250     }
 251 
 252     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.


 275     assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
 276     assert(r->is_empty(), "Should be empty");
 277 
 278     if (i == beg) {
 279       r->make_humongous_start();
 280     } else {
 281       r->make_humongous_cont();
 282     }
 283 
 284     // Trailing region may be non-full, record the remainder there
 285     size_t used_words;
 286     if ((i == end) && (remainder != 0)) {
 287       used_words = remainder;
 288     } else {
 289       used_words = ShenandoahHeapRegion::region_size_words();
 290     }
 291 
 292     r->set_top(r->bottom() + used_words);
 293     r->reset_alloc_metadata_to_shared();
 294 
 295     r->increase_live_data_alloc_words(used_words);
 296 
 297     _mutator_free_bitmap.clear_bit(r->region_number());
 298   }
 299 
 300   // While individual regions report their true use, all humongous regions are
 301   // marked used in the free set.
 302   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 303 
 304   if (remainder != 0) {
 305     // Record this remainder as allocation waste
 306     _heap->notify_alloc(ShenandoahHeapRegion::region_size_words() - remainder, true);
 307   }
 308 
 309   // Allocated at left/rightmost? Move the bounds appropriately.
 310   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 311     adjust_bounds();
 312   }
 313   assert_bounds();
 314 

 315   return _heap->get_region(beg)->bottom();
 316 }
 317 
 318 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 319   return r->is_empty() || r->is_trash();
 320 }
 321 
 322 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 323   if (r->is_trash()) {
 324     // This would be recycled on allocation path
 325     return ShenandoahHeapRegion::region_size_bytes();
 326   } else {
 327     return r->free();
 328   }
 329 }
 330 
 331 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 332   return alloc_capacity(r) == 0;
 333 }
 334 


 336   if (r->is_trash()) {
 337     _heap->decrease_used(r->used());
 338     r->recycle();
 339   }
 340 }
 341 
 342 void ShenandoahFreeSet::recycle_trash() {
 343   // lock is not reentrable, check we don't have it
 344   assert_heaplock_not_owned_by_current_thread();
 345 
 346   for (size_t i = 0; i < _heap->num_regions(); i++) {
 347     ShenandoahHeapRegion* r = _heap->get_region(i);
 348     if (r->is_trash()) {
 349       ShenandoahHeapLocker locker(_heap->lock());
 350       try_recycle_trashed(r);
 351     }
 352     SpinPause(); // allow allocators to take the lock
 353   }
 354 }
 355 
 356 void ShenandoahFreeSet::flip_to_gc(size_t idx) {





 357   _mutator_free_bitmap.clear_bit(idx);
 358   _collector_free_bitmap.set_bit(idx);
 359   _collector_leftmost = MIN2(idx, _collector_leftmost);
 360   _collector_rightmost = MAX2(idx, _collector_rightmost);
 361   if (touches_bounds(idx)) {
 362     adjust_bounds();
 363   }
 364   assert_bounds();
 365 }
 366 
 367 void ShenandoahFreeSet::flip_to_mutator(size_t idx) {
 368   _collector_free_bitmap.clear_bit(idx);
 369   _mutator_free_bitmap.set_bit(idx);
 370   _mutator_leftmost = MIN2(idx, _mutator_leftmost);
 371   _mutator_rightmost = MAX2(idx, _mutator_rightmost);
 372   if (touches_bounds(idx)) {
 373     adjust_bounds();
 374   }
 375   assert_bounds();
 376 }
 377 
 378 void ShenandoahFreeSet::clear() {
 379   assert_heaplock_owned_by_current_thread();
 380   clear_internal();
 381 }
 382 
 383 void ShenandoahFreeSet::clear_internal() {
 384   _mutator_free_bitmap.clear();
 385   _collector_free_bitmap.clear();
 386   _mutator_leftmost = _max;
 387   _mutator_rightmost = 0;
 388   _collector_leftmost = _max;
 389   _collector_rightmost = 0;
 390   _capacity = 0;
 391   _used = 0;
 392 }
 393 
 394 void ShenandoahFreeSet::rebuild() {
 395   assert_heaplock_owned_by_current_thread();
 396   clear();
 397 
 398   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 399     ShenandoahHeapRegion* region = _heap->get_region(idx);
 400     if (region->is_alloc_allowed() || region->is_trash()) {
 401       assert(!region->in_collection_set(), "Shouldn't be adding those to the free set");
 402 
 403       // Do not add regions that would surely fail allocation
 404       if (has_no_alloc_capacity(region)) continue;
 405 
 406       _capacity += alloc_capacity(region);
 407       assert(_used <= _capacity, "must not use more than we have");
 408 
 409       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 410       _mutator_free_bitmap.set_bit(idx);
 411       _mutator_leftmost  = MIN2(_mutator_leftmost, idx);
 412       _mutator_rightmost = MAX2(_mutator_rightmost, idx);
 413     }
 414   }
 415 


















 416   assert_bounds();
 417   log_status();
 418 }
 419 
 420 void ShenandoahFreeSet::log_status() {
 421   log_info(gc, ergo)("Free: " SIZE_FORMAT "M, Regions: " SIZE_FORMAT " mutator, " SIZE_FORMAT " collector",
 422                      capacity() / M, mutator_count(), collector_count());
 423 }
 424 
 425 void ShenandoahFreeSet::log_status_verbose() {
 426   assert_heaplock_owned_by_current_thread();
 427 
 428   if (ShenandoahLogInfo || PrintGCDetails) {
 429     ResourceMark rm;
 430     outputStream* ls = gclog_or_tty;
 431 
 432     ls->print_cr("Free set: Used: " SIZE_FORMAT "M of " SIZE_FORMAT "M, Regions: " SIZE_FORMAT " mutator, " SIZE_FORMAT " collector",
 433                 used() / M, capacity() / M, mutator_count(), collector_count());
 434 
 435     {
 436       ls->print("Free set: Mutator view: ");
 437 
 438       size_t last_idx = 0;
 439       size_t max = 0;
 440       size_t max_contig = 0;
 441       size_t empty_contig = 0;
 442 
 443       size_t total_used = 0;
 444       size_t total_free = 0;
 445 
 446       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 447         if (is_mutator_free(idx)) {
 448           ShenandoahHeapRegion *r = _heap->get_region(idx);
 449           size_t free = r->free();
 450 
 451           max = MAX2(max, free);
 452 
 453           if (r->is_empty() && (last_idx + 1 == idx)) {
 454             empty_contig++;
 455           } else {
 456             empty_contig = 0;
 457           }
 458 
 459           total_used += r->used();
 460           total_free += free;
 461 
 462           max_contig = MAX2(max_contig, empty_contig);
 463           last_idx = idx;
 464         }
 465       }
 466 
 467       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 468       size_t free = capacity() - used();
 469 
 470       ls->print("Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ", max / K, max_humongous / K);

 471 
 472       size_t frag_ext;
 473       if (free > 0) {
 474         frag_ext = 100 - (100 * max_humongous / free);
 475       } else {
 476         frag_ext = 0;
 477       }
 478       ls->print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
 479 
 480       size_t frag_int;
 481       if (mutator_count() > 0) {
 482         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 483       } else {
 484         frag_int = 0;
 485       }
 486       ls->print("Internal frag: " SIZE_FORMAT "%%", frag_int);
 487       ls->cr();
 488     }
 489 
 490     {
 491       ls->print("Free set: Collector view: ");
 492 
 493       size_t max = 0;


 494       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 495         if (is_collector_free(idx)) {
 496           ShenandoahHeapRegion *r = _heap->get_region(idx);
 497           max = MAX2(max, r->free());


 498         }
 499       }
 500 
 501       ls->print("Max regular: " SIZE_FORMAT "K", max / K);
 502       ls->cr();
 503     }
 504   }
 505 }
 506 
 507 HeapWord* ShenandoahFreeSet::allocate(size_t word_size, ShenandoahHeap::AllocType type, bool& in_new_region) {
 508   assert_heaplock_owned_by_current_thread();
 509   assert_bounds();
 510 
 511   if (word_size > ShenandoahHeapRegion::humongous_threshold_words()) {
 512     switch (type) {
 513       case ShenandoahHeap::_alloc_shared:
 514       case ShenandoahHeap::_alloc_shared_gc:
 515         in_new_region = true;
 516         return allocate_contiguous(word_size);
 517       case ShenandoahHeap::_alloc_gclab:
 518       case ShenandoahHeap::_alloc_tlab:
 519         in_new_region = false;
 520         log_warning(gc)("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 521                         word_size, ShenandoahHeapRegion::humongous_threshold_words());
 522         return NULL;
 523       default:
 524         ShouldNotReachHere();
 525         return NULL;
 526     }
 527   } else {
 528     return allocate_single(word_size, type, in_new_region);
 529   }
 530 }
 531 
 532 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 533   // Deliberately not locked, this method is unsafe when free set is modified.
 534 
 535   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 536     if (index < _max && is_mutator_free(index)) {
 537       ShenandoahHeapRegion* r = _heap->get_region(index);
 538       if (r->free() >= MinTLABSize) {
 539         return r->free();
 540       }
 541     }
 542   }
 543 
 544   // It appears that no regions left
 545   return 0;
 546 }
 547 
 548 void ShenandoahFreeSet::print_on(outputStream* out) const {




  40 
  41   assert(_used <= _capacity, err_msg("must not use more than we have: used: "SIZE_FORMAT
  42                                      ", capacity: "SIZE_FORMAT", num_bytes: "SIZE_FORMAT,
  43                                      _used, _capacity, num_bytes));
  44 }
  45 
  46 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  47   assert (idx < _max,
  48           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  49                   idx, _max, _mutator_leftmost, _mutator_rightmost));
  50   return _mutator_free_bitmap.at(idx);
  51 }
  52 
  53 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  54   assert (idx < _max,
  55           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  56                   idx, _max, _collector_leftmost, _collector_rightmost));
  57   return _collector_free_bitmap.at(idx);
  58 }
  59 
  60 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
  61   // Scan the bitmap looking for a first fit.
  62   //
  63   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  64   // we would find the region to allocate at right away.
  65   //
  66   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  67   // go to the end. This makes application allocation faster, because we would clear lots
  68   // of regions from the beginning most of the time.
  69   //
  70   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  71   // unless we special cases for stealing and mixed allocations.
  72 
  73   switch (req.type()) {
  74     case ShenandoahHeap::_alloc_tlab:
  75     case ShenandoahHeap::_alloc_shared: {
  76 
  77       // Try to allocate in the mutator view
  78       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  79         if (is_mutator_free(idx)) {
  80           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  81           if (result != NULL) {
  82             return result;
  83           }
  84         }
  85       }
  86 
  87       // There is no recovery. Mutator does not touch collector view at all.

























  88       break;
  89     }
  90     case ShenandoahHeap::_alloc_gclab:
  91     case ShenandoahHeap::_alloc_shared_gc: {
  92       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  93 
  94       // Fast-path: try to allocate in the collector view first
  95       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  96         size_t idx = c - 1;
  97         if (is_collector_free(idx)) {
  98           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  99           if (result != NULL) {
 100             return result;
 101           }
 102         }
 103       }
 104 
 105       // No dice. Can we borrow space from mutator view?
 106       if (!ShenandoahEvacReserveOverflow) {
 107         return NULL;
 108       }
 109 
 110       // Try to steal the empty region from the mutator view
 111       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 112         size_t idx = c - 1;
 113         if (is_mutator_free(idx)) {
 114           ShenandoahHeapRegion* r = _heap->get_region(idx);
 115           if (is_empty_or_trash(r)) {
 116             flip_to_gc(r);
 117             HeapWord *result = try_allocate_in(r, req, in_new_region);
 118             if (result != NULL) {
 119               return result;
 120             }
 121           }
 122         }
 123       }
 124 
 125       // Try to mix the allocation into the mutator view:
 126       if (ShenandoahAllowMixedAllocs) {
 127         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 128           size_t idx = c - 1;
 129           if (is_mutator_free(idx)) {
 130             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 131             if (result != NULL) {
 132               return result;
 133             }
 134           }
 135         }
 136       }

 137       break;
 138     }
 139     default:
 140       ShouldNotReachHere();
 141   }
 142 
 143   return NULL;
 144 }
 145 
 146 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
 147   assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number()));
 148 
 149   try_recycle_trashed(r);
 150 
 151   in_new_region = r->is_empty();
 152 
 153   HeapWord* result = NULL;
 154   size_t size = req.size();
 155 
 156   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 157     size_t free = align_size_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 158     if (size > free) {
 159       size = free;
 160     }
 161     if (size >= req.min_size()) {
 162       result = r->allocate(size, req.type());
 163       assert (result != NULL, err_msg("Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size));
 164     }
 165   } else {
 166     result = r->allocate(size, req.type());
 167   }
 168 
 169   if (result != NULL) {
 170     // Allocation successful, bump stats:
 171     if (req.is_mutator_alloc()) {
 172       increase_used(size * HeapWordSize);
 173     }
 174 
 175     // Record actual allocation size
 176     req.set_actual_size(size);
 177   }
 178 
 179   if (result == NULL || has_no_alloc_capacity(r)) {
 180     // Region cannot afford this or future allocations. Retire it.
 181     //
 182     // While this seems a bit harsh, especially in the case when this large allocation does not
 183     // fit, but the next small one would, we are risking to inflate scan times when lots of
 184     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 185     // TODO: Record first fully-empty region, and use that for large allocations
 186 
 187     // Record the remainder as allocation waste
 188     if (req.is_mutator_alloc()) {
 189       size_t waste = r->free();
 190       if (waste > 0) {
 191         increase_used(waste);
 192         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 193       }
 194     }
 195 
 196     size_t num = r->region_number();
 197     _collector_free_bitmap.clear_bit(num);
 198     _mutator_free_bitmap.clear_bit(num);
 199     // Touched the bounds? Need to update:
 200     if (touches_bounds(num)) {
 201       adjust_bounds();
 202     }
 203     assert_bounds();
 204   }
 205   return result;
 206 }
 207 
 208 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 209   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 210 }
 211 
 212 void ShenandoahFreeSet::recompute_bounds() {
 213   // Reset to the most pessimistic case:
 214   _mutator_rightmost = _max - 1;
 215   _mutator_leftmost = 0;
 216   _collector_rightmost = _max - 1;
 217   _collector_leftmost = 0;
 218 
 219   // ...and adjust from there
 220   adjust_bounds();
 221 }
 222 
 223 void ShenandoahFreeSet::adjust_bounds() {
 224   // Rewind both mutator bounds until the next bit.
 225   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 226     _mutator_leftmost++;
 227   }
 228   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 229     _mutator_rightmost--;
 230   }
 231   // Rewind both collector bounds until the next bit.
 232   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 233     _collector_leftmost++;
 234   }
 235   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 236     _collector_rightmost--;
 237   }
 238 }
 239 
 240 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahHeap::ShenandoahAllocationRequest& req) {
 241   assert_heaplock_owned_by_current_thread();
 242 
 243   size_t words_size = req.size();
 244   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 245 
 246   // No regions left to satisfy allocation, bye.
 247   if (num > mutator_count()) {
 248     return NULL;
 249   }
 250 
 251   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 252   // inclusive. Contiguous allocations are biased to the beginning.
 253 
 254   size_t beg = _mutator_leftmost;
 255   size_t end = beg;
 256 
 257   while (true) {
 258     if (end >= _max) {
 259       // Hit the end, goodbye
 260       return NULL;
 261     }
 262 
 263     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.


 286     assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
 287     assert(r->is_empty(), "Should be empty");
 288 
 289     if (i == beg) {
 290       r->make_humongous_start();
 291     } else {
 292       r->make_humongous_cont();
 293     }
 294 
 295     // Trailing region may be non-full, record the remainder there
 296     size_t used_words;
 297     if ((i == end) && (remainder != 0)) {
 298       used_words = remainder;
 299     } else {
 300       used_words = ShenandoahHeapRegion::region_size_words();
 301     }
 302 
 303     r->set_top(r->bottom() + used_words);
 304     r->reset_alloc_metadata_to_shared();
 305 


 306     _mutator_free_bitmap.clear_bit(r->region_number());
 307   }
 308 
 309   // While individual regions report their true use, all humongous regions are
 310   // marked used in the free set.
 311   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 312 
 313   if (remainder != 0) {
 314     // Record this remainder as allocation waste
 315     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 316   }
 317 
 318   // Allocated at left/rightmost? Move the bounds appropriately.
 319   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 320     adjust_bounds();
 321   }
 322   assert_bounds();
 323 
 324   req.set_actual_size(words_size);
 325   return _heap->get_region(beg)->bottom();
 326 }
 327 
 328 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 329   return r->is_empty() || r->is_trash();
 330 }
 331 
 332 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 333   if (r->is_trash()) {
 334     // This would be recycled on allocation path
 335     return ShenandoahHeapRegion::region_size_bytes();
 336   } else {
 337     return r->free();
 338   }
 339 }
 340 
 341 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 342   return alloc_capacity(r) == 0;
 343 }
 344 


 346   if (r->is_trash()) {
 347     _heap->decrease_used(r->used());
 348     r->recycle();
 349   }
 350 }
 351 
 352 void ShenandoahFreeSet::recycle_trash() {
 353   // lock is not reentrable, check we don't have it
 354   assert_heaplock_not_owned_by_current_thread();
 355 
 356   for (size_t i = 0; i < _heap->num_regions(); i++) {
 357     ShenandoahHeapRegion* r = _heap->get_region(i);
 358     if (r->is_trash()) {
 359       ShenandoahHeapLocker locker(_heap->lock());
 360       try_recycle_trashed(r);
 361     }
 362     SpinPause(); // allow allocators to take the lock
 363   }
 364 }
 365 
 366 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 367   size_t idx = r->region_number();
 368 
 369   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 370   assert(is_empty_or_trash(r), "Should not be allocated");
 371 
 372   _mutator_free_bitmap.clear_bit(idx);
 373   _collector_free_bitmap.set_bit(idx);
 374   _collector_leftmost = MIN2(idx, _collector_leftmost);
 375   _collector_rightmost = MAX2(idx, _collector_rightmost);





 376 
 377   _capacity -= alloc_capacity(r);
 378 



 379   if (touches_bounds(idx)) {
 380     adjust_bounds();
 381   }
 382   assert_bounds();
 383 }
 384 
 385 void ShenandoahFreeSet::clear() {
 386   assert_heaplock_owned_by_current_thread();
 387   clear_internal();
 388 }
 389 
 390 void ShenandoahFreeSet::clear_internal() {
 391   _mutator_free_bitmap.clear();
 392   _collector_free_bitmap.clear();
 393   _mutator_leftmost = _max;
 394   _mutator_rightmost = 0;
 395   _collector_leftmost = _max;
 396   _collector_rightmost = 0;
 397   _capacity = 0;
 398   _used = 0;
 399 }
 400 
 401 void ShenandoahFreeSet::rebuild() {
 402   assert_heaplock_owned_by_current_thread();
 403   clear();
 404 
 405   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 406     ShenandoahHeapRegion* region = _heap->get_region(idx);
 407     if (region->is_alloc_allowed() || region->is_trash()) {
 408       assert(!region->in_collection_set(), "Shouldn't be adding those to the free set");
 409 
 410       // Do not add regions that would surely fail allocation
 411       if (has_no_alloc_capacity(region)) continue;
 412 
 413       _capacity += alloc_capacity(region);
 414       assert(_used <= _capacity, "must not use more than we have");
 415 
 416       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 417       _mutator_free_bitmap.set_bit(idx);


 418     }
 419   }
 420 
 421   // Evac reserve: reserve trailing space for evacuations
 422   size_t to_reserve = ShenandoahEvacReserve * _heap->capacity() / 100;
 423   size_t reserved = 0;
 424 
 425   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 426     if (reserved >= to_reserve) break;
 427 
 428     ShenandoahHeapRegion* region = _heap->get_region(idx);
 429     if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
 430       _mutator_free_bitmap.clear_bit(idx);
 431       _collector_free_bitmap.set_bit(idx);
 432       size_t ac = alloc_capacity(region);
 433       _capacity -= ac;
 434       reserved += ac;
 435     }
 436   }
 437 
 438   recompute_bounds();
 439   assert_bounds();

 440 }
 441 
 442 void ShenandoahFreeSet::log_status() {





 443   assert_heaplock_owned_by_current_thread();
 444 
 445   if (ShenandoahLogInfo || PrintGCDetails) {
 446     ResourceMark rm;
 447     outputStream* ls = gclog_or_tty;
 448 



 449     {


 450       size_t last_idx = 0;
 451       size_t max = 0;
 452       size_t max_contig = 0;
 453       size_t empty_contig = 0;
 454 
 455       size_t total_used = 0;
 456       size_t total_free = 0;
 457 
 458       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 459         if (is_mutator_free(idx)) {
 460           ShenandoahHeapRegion *r = _heap->get_region(idx);
 461           size_t free = alloc_capacity(r);
 462 
 463           max = MAX2(max, free);
 464 
 465           if (r->is_empty() && (last_idx + 1 == idx)) {
 466             empty_contig++;
 467           } else {
 468             empty_contig = 0;
 469           }
 470 
 471           total_used += r->used();
 472           total_free += free;
 473 
 474           max_contig = MAX2(max_contig, empty_contig);
 475           last_idx = idx;
 476         }
 477       }
 478 
 479       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 480       size_t free = capacity() - used();
 481 
 482       ls->print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ",
 483                total_free / M, mutator_count(), max / K, max_humongous / K);
 484 
 485       size_t frag_ext;
 486       if (free > 0) {
 487         frag_ext = 100 - (100 * max_humongous / free);
 488       } else {
 489         frag_ext = 0;
 490       }
 491       ls->print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
 492 
 493       size_t frag_int;
 494       if (mutator_count() > 0) {
 495         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 496       } else {
 497         frag_int = 0;
 498       }
 499       ls->print("Internal frag: " SIZE_FORMAT "%%", frag_int);
 500       ls->cr();
 501     }
 502 
 503     {


 504       size_t max = 0;
 505       size_t total_free = 0;
 506 
 507       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 508         if (is_collector_free(idx)) {
 509           ShenandoahHeapRegion *r = _heap->get_region(idx);
 510           size_t free = alloc_capacity(r);
 511           max = MAX2(max, free);
 512           total_free += free;
 513         }
 514       }
 515 
 516       ls->print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K",
 517                   total_free / M, collector_count(), max / K);
 518     }
 519   }
 520 }
 521 
 522 HeapWord* ShenandoahFreeSet::allocate(ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
 523   assert_heaplock_owned_by_current_thread();
 524   assert_bounds();
 525 
 526   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 527     switch (req.type()) {
 528       case ShenandoahHeap::_alloc_shared:
 529       case ShenandoahHeap::_alloc_shared_gc:
 530         in_new_region = true;
 531         return allocate_contiguous(req);
 532       case ShenandoahHeap::_alloc_gclab:
 533       case ShenandoahHeap::_alloc_tlab:
 534         in_new_region = false;
 535         assert(false, err_msg("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 536                               req.size(), ShenandoahHeapRegion::humongous_threshold_words()));
 537         return NULL;
 538       default:
 539         ShouldNotReachHere();
 540         return NULL;
 541     }
 542   } else {
 543     return allocate_single(req, in_new_region);
 544   }
 545 }
 546 
 547 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 548   // Deliberately not locked, this method is unsafe when free set is modified.
 549 
 550   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 551     if (index < _max && is_mutator_free(index)) {
 552       ShenandoahHeapRegion* r = _heap->get_region(index);
 553       if (r->free() >= MinTLABSize) {
 554         return r->free();
 555       }
 556     }
 557   }
 558 
 559   // It appears that no regions left
 560   return 0;
 561 }
 562 
 563 void ShenandoahFreeSet::print_on(outputStream* out) const {


< prev index next >