1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc_implementation/g1/heapRegionSeq.hpp"
  28 #include "memory/allocation.hpp"
  29 
  30 // Local to this file.
  31 
  32 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
  33   if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
  34   else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
  35   else if (*hr1p == *hr2p) return 0;
  36   else {
  37     assert(false, "We should never compare distinct overlapping regions.");
  38   }
  39   return 0;
  40 }
  41 
  42 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
  43   _alloc_search_start(0),
  44   // The line below is the worst bit of C++ hackery I've ever written
  45   // (Detlefs, 11/23).  You should think of it as equivalent to
  46   // "_regions(100, true)": initialize the growable array and inform it
  47   // that it should allocate its elem array(s) on the C heap.
  48   //
  49   // The first argument, however, is actually a comma expression
  50   // (set_allocation_type(this, C_HEAP), 100). The purpose of the
  51   // set_allocation_type() call is to replace the default allocation
  52   // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
  53   // allow to pass the assert in GenericGrowableArray() which checks
  54   // that a growable array object must be on C heap if elements are.
  55   //
  56   // Note: containing object is allocated on C heap since it is CHeapObj.
  57   //
  58   _regions((ResourceObj::set_allocation_type((address)&_regions,
  59                                              ResourceObj::C_HEAP),
  60             (int)max_size),
  61            true),
  62   _next_rr_candidate(0),
  63   _seq_bottom(NULL)
  64 {}
  65 
  66 // Private methods.
  67 
  68 HeapWord*
  69 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
  70   assert(G1CollectedHeap::isHumongous(word_size),
  71          "Allocation size should be humongous");
  72   int cur = ind;
  73   int first = cur;
  74   size_t sumSizes = 0;
  75   while (cur < _regions.length() && sumSizes < word_size) {
  76     // Loop invariant:
  77     //  For all i in [first, cur):
  78     //       _regions.at(i)->is_empty()
  79     //    && _regions.at(i) is contiguous with its predecessor, if any
  80     //  && sumSizes is the sum of the sizes of the regions in the interval
  81     //       [first, cur)
  82     HeapRegion* curhr = _regions.at(cur);
  83     if (curhr->is_empty()
  84         && (first == cur
  85             || (_regions.at(cur-1)->end() ==
  86                 curhr->bottom()))) {
  87       sumSizes += curhr->capacity() / HeapWordSize;
  88     } else {
  89       first = cur + 1;
  90       sumSizes = 0;
  91     }
  92     cur++;
  93   }
  94   if (sumSizes >= word_size) {
  95     _alloc_search_start = cur;
  96     // Mark the allocated regions as allocated.
  97     bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
  98     HeapRegion* first_hr = _regions.at(first);
  99     for (int i = first; i < cur; i++) {
 100       HeapRegion* hr = _regions.at(i);
 101       if (zf)
 102         hr->ensure_zero_filled();
 103       {
 104         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
 105         hr->set_zero_fill_allocated();
 106       }
 107       size_t sz = hr->capacity() / HeapWordSize;
 108       HeapWord* tmp = hr->allocate(sz);
 109       assert(tmp != NULL, "Humongous allocation failure");
 110       MemRegion mr = MemRegion(tmp, sz);
 111       CollectedHeap::fill_with_object(mr);
 112       hr->declare_filled_region_to_BOT(mr);
 113       if (i == first) {
 114         first_hr->set_startsHumongous();
 115       } else {
 116         assert(i > first, "sanity");
 117         hr->set_continuesHumongous(first_hr);
 118       }
 119     }
 120     HeapWord* first_hr_bot = first_hr->bottom();
 121     HeapWord* obj_end = first_hr_bot + word_size;
 122     first_hr->set_top(obj_end);
 123     return first_hr_bot;
 124   } else {
 125     // If we started from the beginning, we want to know why we can't alloc.
 126     return NULL;
 127   }
 128 }
 129 
 130 void HeapRegionSeq::print_empty_runs() {
 131   int empty_run = 0;
 132   int n_empty = 0;
 133   int empty_run_start;
 134   for (int i = 0; i < _regions.length(); i++) {
 135     HeapRegion* r = _regions.at(i);
 136     if (r->continuesHumongous()) continue;
 137     if (r->is_empty()) {
 138       assert(!r->isHumongous(), "H regions should not be empty.");
 139       if (empty_run == 0) empty_run_start = i;
 140       empty_run++;
 141       n_empty++;
 142     } else {
 143       if (empty_run > 0) {
 144         gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
 145         empty_run = 0;
 146       }
 147     }
 148   }
 149   if (empty_run > 0) {
 150     gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
 151   }
 152   gclog_or_tty->print_cr(" [tot = %d]", n_empty);
 153 }
 154 
 155 int HeapRegionSeq::find(HeapRegion* hr) {
 156   // FIXME: optimized for adjacent regions of fixed size.
 157   int ind = hr->hrs_index();
 158   if (ind != -1) {
 159     assert(_regions.at(ind) == hr, "Mismatch");
 160   }
 161   return ind;
 162 }
 163 
 164 
 165 // Public methods.
 166 
 167 void HeapRegionSeq::insert(HeapRegion* hr) {
 168   assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
 169   if (_regions.length() == 0
 170       || _regions.top()->end() <= hr->bottom()) {
 171     hr->set_hrs_index(_regions.length());
 172     _regions.append(hr);
 173   } else {
 174     _regions.append(hr);
 175     _regions.sort(orderRegions);
 176     for (int i = 0; i < _regions.length(); i++) {
 177       _regions.at(i)->set_hrs_index(i);
 178     }
 179   }
 180   char* bot = (char*)_regions.at(0)->bottom();
 181   if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
 182 }
 183 
 184 size_t HeapRegionSeq::length() {
 185   return _regions.length();
 186 }
 187 
 188 size_t HeapRegionSeq::free_suffix() {
 189   size_t res = 0;
 190   int first = _regions.length() - 1;
 191   int cur = first;
 192   while (cur >= 0 &&
 193          (_regions.at(cur)->is_empty()
 194           && (first == cur
 195               || (_regions.at(cur+1)->bottom() ==
 196                   _regions.at(cur)->end())))) {
 197       res++;
 198       cur--;
 199   }
 200   return res;
 201 }
 202 
 203 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
 204   int cur = _alloc_search_start;
 205   // Make sure "cur" is a valid index.
 206   assert(cur >= 0, "Invariant.");
 207   HeapWord* res = alloc_obj_from_region_index(cur, word_size);
 208   if (res == NULL)
 209     res = alloc_obj_from_region_index(0, word_size);
 210   return res;
 211 }
 212 
 213 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
 214   iterate_from((HeapRegion*)NULL, blk);
 215 }
 216 
 217 // The first argument r is the heap region at which iteration begins.
 218 // This operation runs fastest when r is NULL, or the heap region for
 219 // which a HeapRegionClosure most recently returned true, or the
 220 // heap region immediately to its right in the sequence.  In all
 221 // other cases a linear search is required to find the index of r.
 222 
 223 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
 224 
 225   // :::: FIXME ::::
 226   // Static cache value is bad, especially when we start doing parallel
 227   // remembered set update. For now just don't cache anything (the
 228   // code in the def'd out blocks).
 229 
 230 #if 0
 231   static int cached_j = 0;
 232 #endif
 233   int len = _regions.length();
 234   int j = 0;
 235   // Find the index of r.
 236   if (r != NULL) {
 237 #if 0
 238     assert(cached_j >= 0, "Invariant.");
 239     if ((cached_j < len) && (r == _regions.at(cached_j))) {
 240       j = cached_j;
 241     } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
 242       j = cached_j + 1;
 243     } else {
 244       j = find(r);
 245 #endif
 246       if (j < 0) {
 247         j = 0;
 248       }
 249 #if 0
 250     }
 251 #endif
 252   }
 253   int i;
 254   for (i = j; i < len; i += 1) {
 255     int res = blk->doHeapRegion(_regions.at(i));
 256     if (res) {
 257 #if 0
 258       cached_j = i;
 259 #endif
 260       blk->incomplete();
 261       return;
 262     }
 263   }
 264   for (i = 0; i < j; i += 1) {
 265     int res = blk->doHeapRegion(_regions.at(i));
 266     if (res) {
 267 #if 0
 268       cached_j = i;
 269 #endif
 270       blk->incomplete();
 271       return;
 272     }
 273   }
 274 }
 275 
 276 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
 277   int len = _regions.length();
 278   int i;
 279   for (i = idx; i < len; i++) {
 280     if (blk->doHeapRegion(_regions.at(i))) {
 281       blk->incomplete();
 282       return;
 283     }
 284   }
 285   for (i = 0; i < idx; i++) {
 286     if (blk->doHeapRegion(_regions.at(i))) {
 287       blk->incomplete();
 288       return;
 289     }
 290   }
 291 }
 292 
 293 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
 294                                    size_t& num_regions_deleted) {
 295   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
 296   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
 297 
 298   if (_regions.length() == 0) {
 299     num_regions_deleted = 0;
 300     return MemRegion();
 301   }
 302   int j = _regions.length() - 1;
 303   HeapWord* end = _regions.at(j)->end();
 304   HeapWord* last_start = end;
 305   while (j >= 0 && shrink_bytes > 0) {
 306     HeapRegion* cur = _regions.at(j);
 307     // We have to leave humongous regions where they are,
 308     // and work around them.
 309     if (cur->isHumongous()) {
 310       return MemRegion(last_start, end);
 311     }
 312     assert(cur == _regions.top(), "Should be top");
 313     if (!cur->is_empty()) break;
 314     cur->reset_zero_fill();
 315     shrink_bytes -= cur->capacity();
 316     num_regions_deleted++;
 317     _regions.pop();
 318     last_start = cur->bottom();
 319     // We need to delete these somehow, but can't currently do so here: if
 320     // we do, the ZF thread may still access the deleted region.  We'll
 321     // leave this here as a reminder that we have to do something about
 322     // this.
 323     // delete cur;
 324     j--;
 325   }
 326   return MemRegion(last_start, end);
 327 }
 328 
 329 
 330 class PrintHeapRegionClosure : public  HeapRegionClosure {
 331 public:
 332   bool doHeapRegion(HeapRegion* r) {
 333     gclog_or_tty->print(PTR_FORMAT ":", r);
 334     r->print();
 335     return false;
 336   }
 337 };
 338 
 339 void HeapRegionSeq::print() {
 340   PrintHeapRegionClosure cl;
 341   iterate(&cl);
 342 }