1 /*
   2  * Copyright (c) 2016, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  25 #include "gc/shenandoah/shenandoahHeap.hpp"
  26 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  27 #include "runtime/atomic.hpp"
  28 
  29 ShenandoahFreeSet::ShenandoahFreeSet(size_t max_regions) :
  30   ShenandoahHeapRegionSet(max_regions),
  31   _write_index(0),
  32   _capacity(0),
  33   _used(0)
  34 {
  35 }
  36 
  37 ShenandoahFreeSet::~ShenandoahFreeSet() {
  38 }
  39 
  40 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  41   assert(_used <= _capacity, "must not use more than we have");
  42   Atomic::add(num_bytes, &_used);
  43 }
  44 
  45 size_t ShenandoahFreeSet::used() {
  46   return _used;
  47 }
  48 
  49 size_t ShenandoahFreeSet::capacity() {
  50   return _capacity;
  51 }
  52 
  53 /**
  54  * Return 0 if the range starting at start is a contiguous range with
  55  * num regions. Returns a number > 0 otherwise. That number tells
  56  * the caller, how many regions to skip (because we know, there
  57  * can't start a contiguous range there).
  58  */
  59 size_t ShenandoahFreeSet::is_contiguous(size_t start, size_t end, size_t num) {
  60 
  61   ShenandoahHeapRegion* r1 = get(start);
  62 
  63   if (! r1->is_empty()) {
  64     return 1;
  65   }
  66   for (size_t i = 1; i < num; i++) {
  67 
  68     size_t index = (start + i) % _reserved_end;
  69     if (index == end) {
  70       // We reached the end of our free list.
  71       ShouldNotReachHere(); // We limit search in find_contiguous()
  72       return i;
  73     }
  74 
  75     ShenandoahHeapRegion* r2 = get(index);
  76     if (r2->region_number() != r1->region_number() + 1)
  77       return i;
  78     if (! r2->is_empty())
  79       return i+1;
  80 
  81     r1 = r2;
  82   }
  83   return 0;
  84 }
  85 
  86 size_t ShenandoahFreeSet::find_contiguous(size_t start, size_t end, size_t num) {
  87 
  88   assert(start < _reserved_end, "sanity");
  89 
  90   // The modulo will take care of wrapping around.
  91   size_t index = start;
  92   while (index != end && diff_to_end(index, end) > num) {
  93     assert(index < _reserved_end, "sanity");
  94     size_t j = is_contiguous(index, end, num);
  95     if (j == 0) {
  96       return index;
  97     }
  98     index = (index + j) % _reserved_end;
  99   }
 100   return SIZE_MAX;
 101 }
 102 
 103 void ShenandoahFreeSet::push_back_regions(size_t start, size_t end) {
 104   if (start == end) return; // Nothing to do.
 105   for (size_t i = start; i != end; i = (i + 1) % _reserved_end) {
 106     ShenandoahHeapRegion* r = get(i);
 107     // We subtract the capacity here, and add it back in par_add_region.
 108     Atomic::add(-((jlong) r->free()), (jlong*) &_capacity);
 109   }
 110   par_add_regions(_regions, start, diff_to_end(start, end), _reserved_end);
 111 }
 112 
 113 void ShenandoahFreeSet::initialize_humongous_regions(size_t first, size_t num) {
 114   for (size_t i = 0; i < num; i++) {
 115     ShenandoahHeapRegion* current = get((first + i) % _reserved_end);
 116     if (i == 0)
 117       current->set_humongous_start(true);
 118     else
 119       current->set_humongous_continuation(true);
 120 
 121     current->set_top(current->end());
 122     current->increase_live_data(ShenandoahHeapRegion::RegionSizeBytes);
 123   }
 124   increase_used(ShenandoahHeapRegion::RegionSizeBytes * num);
 125   ShenandoahHeap::heap()->increase_used(ShenandoahHeapRegion::RegionSizeBytes * num);
 126 }
 127 
 128 size_t ShenandoahFreeSet::diff_to_end(size_t i, size_t end) const {
 129   if (end <= i) {
 130     end += _reserved_end;
 131   }
 132   assert(end > i, "sanity");
 133   return end == i ? _capacity : end - i;
 134 }
 135 
 136 ShenandoahHeapRegion* ShenandoahFreeSet::claim_contiguous(size_t num) {
 137   size_t current_idx = _current_index;
 138   size_t next = (current_idx + 1) % _reserved_end;
 139   size_t end = _active_end;
 140   while (next != end && diff_to_end(next, end) > num) {
 141     size_t first = find_contiguous(next, end, num);
 142     if (first == SIZE_MAX) return NULL;
 143     size_t next_current = (first + num) % _reserved_end;
 144     assert(next_current != end, "never set current==end");
 145     do {
 146       size_t result = (size_t) Atomic::cmpxchg((jlong) next_current, (jlong*) &_current_index, (jlong) current_idx);
 147       if (result == current_idx) {
 148 
 149         push_back_regions(next, first);
 150 
 151         initialize_humongous_regions(first, num);
 152         assert(current_index() != first, "current overlaps with contiguous regions");
 153         return get(first);
 154       }
 155 
 156       current_idx = result;
 157       assert(current_idx != end, "must not cross active-end");
 158       next = (current_idx + 1) % _reserved_end;
 159       end = _active_end;
 160     } while (diff_to_end(current_idx, end) > diff_to_end(first, end));
 161   }
 162   return NULL;
 163 }
 164 
 165 void ShenandoahFreeSet::clear() {
 166   _active_end = _current_index;
 167   _write_index = _current_index;
 168   _capacity = 0;
 169   _used = 0;
 170 }
 171 
 172 void ShenandoahFreeSet::par_add_regions(ShenandoahHeapRegion** regions, size_t start, size_t num, size_t max) {
 173 
 174   size_t next = Atomic::add(num, &_write_index);
 175   assert(next >= num, "don't get negative");
 176   size_t bottom = (next - num) % _reserved_end;
 177   next = next % _reserved_end;
 178   assert(bottom != next, "must be");
 179 
 180   size_t capacity = 0;
 181   for (size_t i = 0; i < num; i++) {
 182     ShenandoahHeapRegion* r = regions[(start + i) % max];
 183     capacity += r->free();
 184     _regions[(bottom + i) % _reserved_end] = r;
 185   }
 186 
 187   // loop until we succeed in bringing the active_end up to our
 188   // write index
 189   // active_end gets set to 0 when we start a full gc
 190   while (true) {
 191     size_t test = (size_t) Atomic::cmpxchg((jlong) next, (jlong*) &_active_end, (jlong) bottom);
 192     if (test == bottom) {
 193       Atomic::add(capacity, &_capacity);
 194       return;
 195     } else {
 196       // Don't starve competing threads.
 197       os::naked_yield();
 198     }
 199   }
 200 
 201 }
 202 
 203 void ShenandoahFreeSet::add_region(ShenandoahHeapRegion* r) {
 204   assert(!r->is_in_collection_set(), "Shouldn't be adding those to the free set");
 205   assert(!contains(r), "We are about to add it, it shouldn't be there already");
 206   assert(!r->is_humongous(), "Don't add to humongous regions");
 207 
 208   assert(_active_end < _reserved_end, "within bounds and no wrapping here");
 209 
 210   _regions[_active_end] = r;
 211   _active_end = (_active_end + 1) % _reserved_end;
 212   _write_index++;
 213   _capacity += r->free();
 214   assert(_used <= _capacity, "must not use more than we have");
 215 }
 216 
 217 size_t ShenandoahFreeSet::par_claim_next(size_t idx) {
 218   size_t next = (idx + 1) % _reserved_end;
 219   if (next == _active_end) {
 220     // Don't increase _current_index up to _active_end.
 221     return SIZE_MAX;
 222   }
 223   size_t result = (size_t) Atomic::cmpxchg((jlong) next, (jlong*) &_current_index, (jlong) idx);
 224 
 225   if (result == idx) {
 226     result = next;
 227   }
 228   assert (result != _active_end, "don't increase current into active_end");
 229   return result;
 230 }