1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shenandoah/brooksPointer.hpp"
  25 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  26 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  27 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  28 #include "gc/shenandoah/shenandoahHumongous.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "utilities/quickSort.hpp"
  31 
  32 ShenandoahHeapRegionSet::ShenandoahHeapRegionSet(size_t max_regions) :
  33   _max_regions(max_regions),
  34   _regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
  35   _garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
  36   _free_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
  37   _available(0), _used(0)
  38 {
  39 
  40   _next = &_regions[0];
  41   _current = NULL;
  42   _next_free = &_regions[0];
  43 }
  44 
  45 ShenandoahHeapRegionSet::ShenandoahHeapRegionSet(size_t max_regions, ShenandoahHeapRegion** regions, size_t num_regions) :
  46   _max_regions(num_regions),
  47   _regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
  48   _garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
  49   _free_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2) {
  50 
  51   // Make copy of the regions array so that we can sort without destroying the original.
  52   memcpy(_regions, regions, sizeof(ShenandoahHeapRegion*) * num_regions);
  53 
  54   _next = &_regions[0];
  55   _current = NULL;
  56   _next_free = &_regions[num_regions];
  57 }
  58 
  59 ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() {
  60   FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, _regions);
  61 }
  62 
  63 int compareHeapRegionsByGarbage(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
  64   if (a == NULL) {
  65     if (b == NULL) {
  66       return 0;
  67     } else {
  68       return 1;
  69     }
  70   } else if (b == NULL) {
  71     return -1;
  72   }
  73 
  74   size_t garbage_a = a->garbage();
  75   size_t garbage_b = b->garbage();
  76 
  77   if (garbage_a > garbage_b)
  78     return -1;
  79   else if (garbage_a < garbage_b)
  80     return 1;
  81   else return 0;
  82 }
  83 
  84 ShenandoahHeapRegion* ShenandoahHeapRegionSet::current() {
  85   ShenandoahHeapRegion** current = _current;
  86   if (current == NULL) {
  87     return get_next();
  88   } else {
  89     return *(limit_region(current));
  90   }
  91 }
  92 
  93 size_t ShenandoahHeapRegionSet::length() {
  94   return _next_free - _regions;
  95 }
  96 
  97 size_t ShenandoahHeapRegionSet::available_regions() {
  98   return (_regions + _max_regions) - _next_free;
  99 }
 100 
 101 void ShenandoahHeapRegionSet::append(ShenandoahHeapRegion* region) {
 102   assert(_next_free < _regions + _max_regions, "need space for additional regions");
 103   assert(SafepointSynchronize::is_at_safepoint() || ShenandoahHeap_lock->owned_by_self() || ! Universe::is_fully_initialized(), "only append regions to list while world is stopped");
 104 
 105   // Grab next slot.
 106   ShenandoahHeapRegion** next_free = _next_free;
 107   _next_free++;
 108 
 109   // Insert new region into slot.
 110   *next_free = region;
 111 
 112   _available += region->free();
 113 }
 114 
 115 void ShenandoahHeapRegionSet::clear() {
 116   _current = NULL;
 117   _next = _regions;
 118   _next_free = _regions;
 119   _available = 0;
 120   _used = 0;
 121 }
 122 
 123 ShenandoahHeapRegion* ShenandoahHeapRegionSet::claim_next() {
 124   ShenandoahHeapRegion** next = (ShenandoahHeapRegion**) Atomic::add_ptr(sizeof(ShenandoahHeapRegion**), &_next);
 125   next--;
 126   if (next < _next_free) {
 127     return *next;
 128   } else {
 129     return NULL;
 130   }
 131 }
 132 
 133 ShenandoahHeapRegion* ShenandoahHeapRegionSet::get_next() {
 134 
 135   ShenandoahHeapRegion** next = _next;
 136   if (next < _next_free) {
 137     _current = next;
 138     _next++;
 139     return *next;
 140   } else {
 141     return NULL;
 142   }
 143 }
 144 
 145 ShenandoahHeapRegion** ShenandoahHeapRegionSet::limit_region(ShenandoahHeapRegion** region) {
 146   if (region >= _next_free) {
 147     return NULL;
 148   } else {
 149     return region;
 150   }
 151 }
 152 
 153 void ShenandoahHeapRegionSet::print() {
 154   for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
 155     if (i == _current) {
 156       tty->print_cr("C->");
 157     }
 158     if (i == _next) {
 159       tty->print_cr("N->");
 160     }
 161     (*i)->print();
 162   }
 163 }
 164 
 165 void ShenandoahHeapRegionSet::choose_collection_and_free_sets(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set) {
 166   col_set->choose_collection_set(_regions, length());
 167   free_set->choose_free_set(_regions, length());
 168   //  assert(col_set->length() > 0 && free_set->length() > 0, "Better have some regions in the collection and free sets");
 169 
 170 }
 171 
 172 void ShenandoahHeapRegionSet::choose_collection_and_free_sets_min_garbage(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set, size_t min_garbage) {
 173   col_set->choose_collection_set_min_garbage(_regions, length(), min_garbage);
 174   free_set->choose_free_set(_regions, length());
 175   //  assert(col_set->length() > 0 && free_set->length() > 0, "Better have some regions in the collection and free sets");
 176 }
 177 
 178 void ShenandoahHeapRegionSet::choose_collection_set(ShenandoahHeapRegion** regions, size_t length) {
 179 
 180   clear();
 181 
 182   assert(length <= _max_regions, "must not blow up array");
 183 
 184   ShenandoahHeapRegion** tmp = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, length, mtGC);
 185 
 186   memcpy(tmp, regions, sizeof(ShenandoahHeapRegion*) * length);
 187 
 188   QuickSort::sort<ShenandoahHeapRegion*>(tmp, length, compareHeapRegionsByGarbage, false);
 189 
 190   ShenandoahHeapRegion** r = tmp;
 191   ShenandoahHeapRegion** end = tmp + length;
 192 
 193   // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
 194 
 195   while (r < end) {
 196     ShenandoahHeapRegion* region = *r;
 197     if (region->garbage() > _garbage_threshold && ! region->is_humongous()) {
 198       //      tty->print("choose region %d with garbage = " SIZE_FORMAT " and live = " SIZE_FORMAT " and _garbage_threshold = " SIZE_FORMAT "\n",
 199       //                 region->region_number(), region->garbage(), region->getLiveData(), _garbage_threshold);
 200 
 201       assert(! region->is_humongous(), "no humongous regions in collection set");
 202 
 203       if (region->getLiveData() == 0) {
 204         // We can recycle it right away and put it in the free set.
 205         ShenandoahHeap::heap()->decrease_used(region->used());
 206         region->recycle();
 207       } else {
 208         append(region);
 209         region->set_is_in_collection_set(true);
 210       }
 211       //    } else {
 212       //      tty->print("rejected region %d with garbage = " SIZE_FORMAT " and live = " SIZE_FORMAT " and _garbage_threshold = " SIZE_FORMAT "\n",
 213       //                 region->region_number(), region->garbage(), region->getLiveData(), _garbage_threshold);
 214     }
 215     r++;
 216   }
 217 
 218   FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, tmp);
 219 
 220 }
 221 
 222 void ShenandoahHeapRegionSet::choose_collection_set_min_garbage(ShenandoahHeapRegion** regions, size_t length, size_t min_garbage) {
 223 
 224   clear();
 225 
 226   assert(length <= _max_regions, "must not blow up array");
 227 
 228   ShenandoahHeapRegion** tmp = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, length, mtGC);
 229 
 230   memcpy(tmp, regions, sizeof(ShenandoahHeapRegion*) * length);
 231 
 232   QuickSort::sort<ShenandoahHeapRegion*>(tmp, length, compareHeapRegionsByGarbage, false);
 233 
 234   ShenandoahHeapRegion** r = tmp;
 235   ShenandoahHeapRegion** end = tmp + length;
 236 
 237   // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
 238 
 239   size_t garbage = 0;
 240   while (r < end && garbage < min_garbage) {
 241     ShenandoahHeapRegion* region = *r;
 242     if (region->garbage() > _garbage_threshold && ! region->is_humongous()) {
 243       append(region);
 244       garbage += region->garbage();
 245       region->set_is_in_collection_set(true);
 246     }
 247     r++;
 248   }
 249 
 250   FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, tmp);
 251 
 252   /*
 253   tty->print_cr("choosen region with "SIZE_FORMAT" garbage given "SIZE_FORMAT" min_garbage", garbage, min_garbage);
 254   */
 255 }
 256 
 257 
 258 void ShenandoahHeapRegionSet::choose_free_set(ShenandoahHeapRegion** regions, size_t length) {
 259 
 260   clear();
 261   ShenandoahHeapRegion** end = regions + length;
 262 
 263   for (ShenandoahHeapRegion** r = regions; r < end; r++) {
 264     ShenandoahHeapRegion* region = *r;
 265     if ((! region->is_in_collection_set())
 266         && (! region->is_humongous())) {
 267       append(region);
 268     }
 269   }
 270 }
 271 
 272 void ShenandoahHeapRegionSet::reclaim_humongous_regions() {
 273 
 274   ShenandoahHeap* heap = ShenandoahHeap::heap();
 275   for (ShenandoahHeapRegion** r = _regions; r < _next_free; r++) {
 276     // We can immediately reclaim humongous objects/regions that are no longer reachable.
 277     ShenandoahHeapRegion* region = *r;
 278     if (region->is_humongous_start()) {
 279       oop humongous_obj = oop(region->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
 280       if (! heap->is_marked_current(humongous_obj)) {
 281         reclaim_humongous_region_at(r);
 282       }
 283     }
 284   }
 285 
 286 }
 287 
 288 void ShenandoahHeapRegionSet::reclaim_humongous_region_at(ShenandoahHeapRegion** r) {
 289   assert((*r)->is_humongous_start(), "reclaim regions starting with the first one");
 290 
 291   oop humongous_obj = oop((*r)->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
 292   size_t size = humongous_obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
 293   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
 294 
 295   if (ShenandoahTraceHumongous) {
 296     tty->print_cr("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 297   }
 298 
 299   assert((*r)->getLiveData() == 0, "liveness must be zero");
 300 
 301   for (ShenandoahHeapRegion** i = r; i < r + required_regions; i++) {
 302     ShenandoahHeapRegion* region = *i;
 303 
 304     assert(i == r ? region->is_humongous_start() : region->is_humongous_continuation(),
 305            "expect correct humongous start or continuation");
 306 
 307     if (ShenandoahTraceHumongous) {
 308       region->print();
 309     }
 310 
 311     region->reset();
 312     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
 313   }
 314 }
 315 
 316 void ShenandoahHeapRegionSet::set_concurrent_iteration_safe_limits() {
 317   for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
 318     ShenandoahHeapRegion* region = *i;
 319     region->set_concurrent_iteration_safe_limit(region->top());
 320   }
 321 }
 322 
 323 size_t ShenandoahHeapRegionSet::garbage() {
 324   size_t garbage = 0;
 325   for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
 326     ShenandoahHeapRegion* region = *i;
 327     garbage += region->garbage();
 328   }
 329   return garbage;
 330 }
 331 
 332 size_t ShenandoahHeapRegionSet::used() {
 333   size_t used = 0;
 334   for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
 335     ShenandoahHeapRegion* region = *i;
 336     used += region->used();
 337   }
 338   return used;
 339 }
 340 
 341 size_t ShenandoahHeapRegionSet::live_data() {
 342   size_t live = 0;
 343   for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
 344     ShenandoahHeapRegion* region = *i;
 345     live += region->getLiveData();
 346   }
 347   return live;
 348 }
 349 
 350 void ShenandoahHeapRegionSet::decrease_available(size_t num_bytes) {
 351   assert(_available >= num_bytes, "can't use more than available");
 352   _available -= num_bytes;
 353   _used += num_bytes;
 354 }
 355 
 356 size_t ShenandoahHeapRegionSet::available() const {
 357 #ifdef ASSERT
 358   // Need to grab the lock here, because the different counters are updated
 359   // within the lock. Otherwise we sometimes get inconsistent results.
 360   {
 361     MutexLockerEx ml(ShenandoahHeap_lock, true);
 362     assert(ShenandoahHeap::heap()->capacity() - ShenandoahHeap::heap()->used()>= _available,
 363            err_msg("must not be > heap free, capacity: "SIZE_FORMAT", used: "SIZE_FORMAT", available: "SIZE_FORMAT,
 364                    ShenandoahHeap::heap()->capacity(), ShenandoahHeap::heap()->used(), _available)
 365            );
 366   }
 367 #endif
 368   return _available;
 369 }
 370 
 371 size_t ShenandoahHeapRegionSet::used() const {
 372   assert(ShenandoahHeap::heap()->used() >= _used, "must not be > heap used");
 373   return _used;
 374 }