1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/collectionSetChooser.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1CollectorPolicy.hpp"
  29 #include "gc/g1/g1ErgoVerbose.hpp"
  30 #include "gc/shared/space.inline.hpp"
  31 #include "runtime/atomic.inline.hpp"
  32 
  33 // Even though we don't use the GC efficiency in our heuristics as
  34 // much as we used to, we still order according to GC efficiency. This
  35 // will cause regions with a lot of live objects and large RSets to
  36 // end up at the end of the array. Given that we might skip collecting
  37 // the last few old regions, if after a few mixed GCs the remaining
  38 // have reclaimable bytes under a certain threshold, the hope is that
  39 // the ones we'll skip are ones with both large RSets and a lot of
  40 // live objects, not the ones with just a lot of live objects if we
  41 // ordered according to the amount of reclaimable bytes per region.
  42 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
  43   if (hr1 == NULL) {
  44     if (hr2 == NULL) {
  45       return 0;
  46     } else {
  47       return 1;
  48     }
  49   } else if (hr2 == NULL) {
  50     return -1;
  51   }
  52 
  53   double gc_eff1 = hr1->gc_efficiency();
  54   double gc_eff2 = hr2->gc_efficiency();
  55   if (gc_eff1 > gc_eff2) {
  56     return -1;
  57   } if (gc_eff1 < gc_eff2) {
  58     return 1;
  59   } else {
  60     return 0;
  61   }
  62 }
  63 
  64 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
  65   return order_regions(*hr1p, *hr2p);
  66 }
  67 
  68 CollectionSetChooser::CollectionSetChooser() :
  69   // The line below is the worst bit of C++ hackery I've ever written
  70   // (Detlefs, 11/23).  You should think of it as equivalent to
  71   // "_regions(100, true)": initialize the growable array and inform it
  72   // that it should allocate its elem array(s) on the C heap.
  73   //
  74   // The first argument, however, is actually a comma expression
  75   // (set_allocation_type(this, C_HEAP), 100). The purpose of the
  76   // set_allocation_type() call is to replace the default allocation
  77   // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
  78   // allow to pass the assert in GenericGrowableArray() which checks
  79   // that a growable array object must be on C heap if elements are.
  80   //
  81   // Note: containing object is allocated on C heap since it is CHeapObj.
  82   //
  83   _regions((ResourceObj::set_allocation_type((address) &_regions,
  84                                              ResourceObj::C_HEAP),
  85                   100), true /* C_Heap */),
  86     _curr_index(0), _length(0), _first_par_unreserved_idx(0),
  87     _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
  88   _region_live_threshold_bytes =
  89     HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
  90 }
  91 
  92 #ifndef PRODUCT
  93 void CollectionSetChooser::verify() {
  94   guarantee(_length <= regions_length(),
  95          err_msg("_length: %u regions length: %u", _length, regions_length()));
  96   guarantee(_curr_index <= _length,
  97             err_msg("_curr_index: %u _length: %u", _curr_index, _length));
  98   uint index = 0;
  99   size_t sum_of_reclaimable_bytes = 0;
 100   while (index < _curr_index) {
 101     guarantee(regions_at(index) == NULL,
 102               "all entries before _curr_index should be NULL");
 103     index += 1;
 104   }
 105   HeapRegion *prev = NULL;
 106   while (index < _length) {
 107     HeapRegion *curr = regions_at(index++);
 108     guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
 109     guarantee(!curr->is_young(), "should not be young!");
 110     guarantee(!curr->is_pinned(), 
 111               err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
 112     if (prev != NULL) {
 113       guarantee(order_regions(prev, curr) != 1,
 114                 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
 115                         prev->gc_efficiency(), curr->gc_efficiency()));
 116     }
 117     sum_of_reclaimable_bytes += curr->reclaimable_bytes();
 118     prev = curr;
 119   }
 120   guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
 121             err_msg("reclaimable bytes inconsistent, "
 122                     "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
 123                     _remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
 124 }
 125 #endif // !PRODUCT
 126 
 127 void CollectionSetChooser::sort_regions() {
 128   // First trim any unused portion of the top in the parallel case.
 129   if (_first_par_unreserved_idx > 0) {
 130     assert(_first_par_unreserved_idx <= regions_length(),
 131            "Or we didn't reserved enough length");
 132     regions_trunc_to(_first_par_unreserved_idx);
 133   }
 134   _regions.sort(order_regions);
 135   assert(_length <= regions_length(), "Requirement");
 136 #ifdef ASSERT
 137   for (uint i = 0; i < _length; i++) {
 138     assert(regions_at(i) != NULL, "Should be true by sorting!");
 139   }
 140 #endif // ASSERT
 141   if (G1PrintRegionLivenessInfo) {
 142     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
 143     for (uint i = 0; i < _length; ++i) {
 144       HeapRegion* r = regions_at(i);
 145       cl.doHeapRegion(r);
 146     }
 147   }
 148   verify();
 149 }
 150 
 151 
 152 void CollectionSetChooser::add_region(HeapRegion* hr) {
 153   assert(!hr->is_pinned(),
 154          err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
 155   assert(!hr->is_young(), "should not be young!");
 156   _regions.append(hr);
 157   _length++;
 158   _remaining_reclaimable_bytes += hr->reclaimable_bytes();
 159   hr->calc_gc_efficiency();
 160 }
 161 
 162 void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads,
 163                                                            uint n_regions,
 164                                                            uint chunk_size) {
 165   _first_par_unreserved_idx = 0;
 166   uint max_waste = n_threads * chunk_size;
 167   // it should be aligned with respect to chunk_size
 168   uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
 169   assert(aligned_n_regions % chunk_size == 0, "should be aligned");
 170   regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
 171 }
 172 
 173 uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
 174   uint res = (uint) Atomic::add((jint) chunk_size,
 175                                 (volatile jint*) &_first_par_unreserved_idx);
 176   assert(regions_length() > res + chunk_size - 1,
 177          "Should already have been expanded");
 178   return res - chunk_size;
 179 }
 180 
 181 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
 182   assert(regions_at(index) == NULL, "precondition");
 183   assert(!hr->is_young(), "should not be young!");
 184   regions_at_put(index, hr);
 185   hr->calc_gc_efficiency();
 186 }
 187 
 188 void CollectionSetChooser::update_totals(uint region_num,
 189                                          size_t reclaimable_bytes) {
 190   // Only take the lock if we actually need to update the totals.
 191   if (region_num > 0) {
 192     assert(reclaimable_bytes > 0, "invariant");
 193     // We could have just used atomics instead of taking the
 194     // lock. However, we currently don't have an atomic add for size_t.
 195     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 196     _length += region_num;
 197     _remaining_reclaimable_bytes += reclaimable_bytes;
 198   } else {
 199     assert(reclaimable_bytes == 0, "invariant");
 200   }
 201 }
 202 
 203 void CollectionSetChooser::clear() {
 204   _regions.clear();
 205   _curr_index = 0;
 206   _length = 0;
 207   _remaining_reclaimable_bytes = 0;
 208 };