1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/collectionSetChooser.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/shared/space.inline.hpp"
  29 #include "runtime/atomic.hpp"
  30 
  31 // Even though we don't use the GC efficiency in our heuristics as
  32 // much as we used to, we still order according to GC efficiency. This
  33 // will cause regions with a lot of live objects and large RSets to
  34 // end up at the end of the array. Given that we might skip collecting
  35 // the last few old regions, if after a few mixed GCs the remaining
  36 // have reclaimable bytes under a certain threshold, the hope is that
  37 // the ones we'll skip are ones with both large RSets and a lot of
  38 // live objects, not the ones with just a lot of live objects if we
  39 // ordered according to the amount of reclaimable bytes per region.
  40 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
  41   if (hr1 == NULL) {
  42     if (hr2 == NULL) {
  43       return 0;
  44     } else {
  45       return 1;
  46     }
  47   } else if (hr2 == NULL) {
  48     return -1;
  49   }
  50 
  51   double gc_eff1 = hr1->gc_efficiency();
  52   double gc_eff2 = hr2->gc_efficiency();
  53   if (gc_eff1 > gc_eff2) {
  54     return -1;
  55   } if (gc_eff1 < gc_eff2) {
  56     return 1;
  57   } else {
  58     return 0;
  59   }
  60 }
  61 
  62 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
  63   return order_regions(*hr1p, *hr2p);
  64 }
  65 
  66 CollectionSetChooser::CollectionSetChooser() :
  67   // The line below is the worst bit of C++ hackery I've ever written
  68   // (Detlefs, 11/23).  You should think of it as equivalent to
  69   // "_regions(100, true)": initialize the growable array and inform it
  70   // that it should allocate its elem array(s) on the C heap.
  71   //
  72   // The first argument, however, is actually a comma expression
  73   // (set_allocation_type(this, C_HEAP), 100). The purpose of the
  74   // set_allocation_type() call is to replace the default allocation
  75   // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
  76   // allow to pass the assert in GenericGrowableArray() which checks
  77   // that a growable array object must be on C heap if elements are.
  78   //
  79   // Note: containing object is allocated on C heap since it is CHeapObj.
  80   //
  81   _regions((ResourceObj::set_allocation_type((address) &_regions,
  82                                              ResourceObj::C_HEAP),
  83                   100), true /* C_Heap */),
  84     _front(0), _end(0), _first_par_unreserved_idx(0),
  85     _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
  86   _region_live_threshold_bytes =
  87     HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
  88 }
  89 
  90 #ifndef PRODUCT
  91 void CollectionSetChooser::verify() {
  92   guarantee(_end <= regions_length(), "_end: %u regions length: %u", _end, regions_length());
  93   guarantee(_front <= _end, "_front: %u _end: %u", _front, _end);
  94   uint index = 0;
  95   size_t sum_of_reclaimable_bytes = 0;
  96   while (index < _front) {
  97     guarantee(regions_at(index) == NULL,
  98               "all entries before _front should be NULL");
  99     index += 1;
 100   }
 101   HeapRegion *prev = NULL;
 102   while (index < _end) {
 103     HeapRegion *curr = regions_at(index++);
 104     guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
 105     guarantee(!curr->is_young(), "should not be young!");
 106     guarantee(!curr->is_pinned(),
 107               "Pinned region should not be in collection set (index %u)", curr->hrm_index());
 108     if (prev != NULL) {
 109       guarantee(order_regions(prev, curr) != 1,
 110                 "GC eff prev: %1.4f GC eff curr: %1.4f",
 111                 prev->gc_efficiency(), curr->gc_efficiency());
 112     }
 113     sum_of_reclaimable_bytes += curr->reclaimable_bytes();
 114     prev = curr;
 115   }
 116   guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
 117             "reclaimable bytes inconsistent, "
 118             "remaining: " SIZE_FORMAT " sum: " SIZE_FORMAT,
 119             _remaining_reclaimable_bytes, sum_of_reclaimable_bytes);
 120 }
 121 #endif // !PRODUCT
 122 
 123 void CollectionSetChooser::sort_regions() {
 124   // First trim any unused portion of the top in the parallel case.
 125   if (_first_par_unreserved_idx > 0) {
 126     assert(_first_par_unreserved_idx <= regions_length(),
 127            "Or we didn't reserved enough length");
 128     regions_trunc_to(_first_par_unreserved_idx);
 129   }
 130   _regions.sort(order_regions);
 131   assert(_end <= regions_length(), "Requirement");
 132 #ifdef ASSERT
 133   for (uint i = 0; i < _end; i++) {
 134     assert(regions_at(i) != NULL, "Should be true by sorting!");
 135   }
 136 #endif // ASSERT
 137   if (log_is_enabled(Trace, gc, liveness)) {
 138     G1PrintRegionLivenessInfoClosure cl("Post-Sorting");
 139     for (uint i = 0; i < _end; ++i) {
 140       HeapRegion* r = regions_at(i);
 141       cl.doHeapRegion(r);
 142     }
 143   }
 144   verify();
 145 }
 146 
 147 void CollectionSetChooser::add_region(HeapRegion* hr) {
 148   assert(!hr->is_pinned(),
 149          "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
 150   assert(!hr->is_young(), "should not be young!");
 151   _regions.append(hr);
 152   _end++;
 153   _remaining_reclaimable_bytes += hr->reclaimable_bytes();
 154   hr->calc_gc_efficiency();
 155 }
 156 
 157 void CollectionSetChooser::push(HeapRegion* hr) {
 158   assert(hr != NULL, "Can't put back a NULL region");
 159   assert(_front >= 1, "Too many regions have been put back");
 160   _front--;
 161   regions_at_put(_front, hr);
 162   _remaining_reclaimable_bytes += hr->reclaimable_bytes();
 163 }
 164 
 165 void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads,
 166                                                            uint n_regions,
 167                                                            uint chunk_size) {
 168   _first_par_unreserved_idx = 0;
 169   uint max_waste = n_threads * chunk_size;
 170   // it should be aligned with respect to chunk_size
 171   uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
 172   assert(aligned_n_regions % chunk_size == 0, "should be aligned");
 173   regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
 174 }
 175 
 176 uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
 177   uint res = (uint) Atomic::add((jint) chunk_size,
 178                                 (volatile jint*) &_first_par_unreserved_idx);
 179   assert(regions_length() > res + chunk_size - 1,
 180          "Should already have been expanded");
 181   return res - chunk_size;
 182 }
 183 
 184 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
 185   assert(regions_at(index) == NULL, "precondition");
 186   assert(!hr->is_young(), "should not be young!");
 187   regions_at_put(index, hr);
 188   hr->calc_gc_efficiency();
 189 }
 190 
 191 void CollectionSetChooser::update_totals(uint region_num,
 192                                          size_t reclaimable_bytes) {
 193   // Only take the lock if we actually need to update the totals.
 194   if (region_num > 0) {
 195     assert(reclaimable_bytes > 0, "invariant");
 196     // We could have just used atomics instead of taking the
 197     // lock. However, we currently don't have an atomic add for size_t.
 198     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 199     _end += region_num;
 200     _remaining_reclaimable_bytes += reclaimable_bytes;
 201   } else {
 202     assert(reclaimable_bytes == 0, "invariant");
 203   }
 204 }
 205 
 206 void CollectionSetChooser::clear() {
 207   _regions.clear();
 208   _front = 0;
 209   _end = 0;
 210   _remaining_reclaimable_bytes = 0;
 211 }
 212 
 213 class ParKnownGarbageTask: public G1ParallelizeByRegionsTask {
 214   class ParKnownGarbageHRClosure: public HeapRegionClosure {
 215     G1CollectedHeap* _g1h;
 216     CSetChooserParUpdater _cset_updater;
 217 
 218   public:
 219     ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
 220                              uint chunk_size) :
 221       _g1h(G1CollectedHeap::heap()),
 222       _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
 223 
 224     bool doHeapRegion(HeapRegion* r) {
 225       // Do we have any marking information for this region?
 226       if (r->is_marked()) {
 227         // We will skip any region that's currently used as an old GC
 228         // alloc region (we should not consider those for collection
 229         // before we fill them up).
 230         if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
 231           _cset_updater.add_region(r);
 232         }
 233       }
 234       return false;
 235     }
 236   };
 237 
 238   CollectionSetChooser* _hrSorted;
 239   uint _chunk_size;
 240 
 241 public:
 242   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
 243       G1ParallelizeByRegionsTask("ParKnownGarbageTask", n_workers),
 244       _hrSorted(hrSorted), _chunk_size(chunk_size) {}
 245 
 246   void work(uint worker_id) {
 247     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
 248     all_heap_regions_work(&parKnownGarbageCl, worker_id);
 249   }
 250 };
 251 
 252 uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
 253   assert(n_workers > 0, "Active gc workers should be greater than 0");
 254   const uint overpartition_factor = 4;
 255   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
 256   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
 257 }
 258 
 259 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
 260   clear();
 261 
 262   uint n_workers = workers->active_workers();
 263 
 264   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
 265   prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
 266 
 267   ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers);
 268   workers->run_task(&par_known_garbage_task);
 269 
 270   sort_regions();
 271 }