1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_heapRegion.cpp.incl"
  27 
  28 int HeapRegion::LogOfHRGrainBytes = 0;
  29 int HeapRegion::LogOfHRGrainWords = 0;
  30 int HeapRegion::GrainBytes        = 0;
  31 int HeapRegion::GrainWords        = 0;
  32 int HeapRegion::CardsPerRegion    = 0;
  33 
  34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  35                                  HeapRegion* hr, OopClosure* cl,
  36                                  CardTableModRefBS::PrecisionStyle precision,
  37                                  FilterKind fk) :
  38   ContiguousSpaceDCTOC(hr, cl, precision, NULL),
  39   _hr(hr), _fk(fk), _g1(g1)
  40 {}
  41 
  42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
  43                                                    OopClosure* oc) :
  44   _r_bottom(r->bottom()), _r_end(r->end()),
  45   _oc(oc), _out_of_region(0)
  46 {}
  47 
  48 class VerifyLiveClosure: public OopClosure {
  49 private:
  50   G1CollectedHeap* _g1h;
  51   CardTableModRefBS* _bs;
  52   oop _containing_obj;
  53   bool _failures;
  54   int _n_failures;
  55   bool _use_prev_marking;
  56 public:
  57   // use_prev_marking == true  -> use "prev" marking information,
  58   // use_prev_marking == false -> use "next" marking information
  59   VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
  60     _g1h(g1h), _bs(NULL), _containing_obj(NULL),
  61     _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
  62   {
  63     BarrierSet* bs = _g1h->barrier_set();
  64     if (bs->is_a(BarrierSet::CardTableModRef))
  65       _bs = (CardTableModRefBS*)bs;
  66   }
  67 
  68   void set_containing_obj(oop obj) {
  69     _containing_obj = obj;
  70   }
  71 
  72   bool failures() { return _failures; }
  73   int n_failures() { return _n_failures; }
  74 
  75   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  76   virtual void do_oop(      oop* p) { do_oop_work(p); }
  77 
  78   void print_object(outputStream* out, oop obj) {
  79 #ifdef PRODUCT
  80     klassOop k = obj->klass();
  81     const char* class_name = instanceKlass::cast(k)->external_name();
  82     out->print_cr("class name %s", class_name);
  83 #else // PRODUCT
  84     obj->print_on(out);
  85 #endif // PRODUCT
  86   }
  87 
  88   template <class T> void do_oop_work(T* p) {
  89     assert(_containing_obj != NULL, "Precondition");
  90     assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
  91            "Precondition");
  92     T heap_oop = oopDesc::load_heap_oop(p);
  93     if (!oopDesc::is_null(heap_oop)) {
  94       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  95       bool failed = false;
  96       if (!_g1h->is_in_closed_subset(obj) ||
  97           _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
  98         if (!_failures) {
  99           gclog_or_tty->print_cr("");
 100           gclog_or_tty->print_cr("----------");
 101         }
 102         if (!_g1h->is_in_closed_subset(obj)) {
 103           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
 104           gclog_or_tty->print_cr("Field "PTR_FORMAT
 105                                  " of live obj "PTR_FORMAT" in region "
 106                                  "["PTR_FORMAT", "PTR_FORMAT")",
 107                                  p, (void*) _containing_obj,
 108                                  from->bottom(), from->end());
 109           print_object(gclog_or_tty, _containing_obj);
 110           gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
 111                                  (void*) obj);
 112         } else {
 113           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
 114           HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
 115           gclog_or_tty->print_cr("Field "PTR_FORMAT
 116                                  " of live obj "PTR_FORMAT" in region "
 117                                  "["PTR_FORMAT", "PTR_FORMAT")",
 118                                  p, (void*) _containing_obj,
 119                                  from->bottom(), from->end());
 120           print_object(gclog_or_tty, _containing_obj);
 121           gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
 122                                  "["PTR_FORMAT", "PTR_FORMAT")",
 123                                  (void*) obj, to->bottom(), to->end());
 124           print_object(gclog_or_tty, obj);
 125         }
 126         gclog_or_tty->print_cr("----------");
 127         _failures = true;
 128         failed = true;
 129         _n_failures++;
 130       }
 131 
 132       if (!_g1h->full_collection()) {
 133         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
 134         HeapRegion* to   = _g1h->heap_region_containing(obj);
 135         if (from != NULL && to != NULL &&
 136             from != to &&
 137             !to->isHumongous()) {
 138           jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
 139           jbyte cv_field = *_bs->byte_for_const(p);
 140           const jbyte dirty = CardTableModRefBS::dirty_card_val();
 141 
 142           bool is_bad = !(from->is_young()
 143                           || to->rem_set()->contains_reference(p)
 144                           || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
 145                               (_containing_obj->is_objArray() ?
 146                                   cv_field == dirty
 147                                : cv_obj == dirty || cv_field == dirty));
 148           if (is_bad) {
 149             if (!_failures) {
 150               gclog_or_tty->print_cr("");
 151               gclog_or_tty->print_cr("----------");
 152             }
 153             gclog_or_tty->print_cr("Missing rem set entry:");
 154             gclog_or_tty->print_cr("Field "PTR_FORMAT
 155                           " of obj "PTR_FORMAT
 156                           ", in region %d ["PTR_FORMAT
 157                           ", "PTR_FORMAT"),",
 158                           p, (void*) _containing_obj,
 159                           from->hrs_index(),
 160                           from->bottom(),
 161                           from->end());
 162             _containing_obj->print_on(gclog_or_tty);
 163             gclog_or_tty->print_cr("points to obj "PTR_FORMAT
 164                           " in region %d ["PTR_FORMAT
 165                           ", "PTR_FORMAT").",
 166                           (void*) obj, to->hrs_index(),
 167                           to->bottom(), to->end());
 168             obj->print_on(gclog_or_tty);
 169             gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
 170                           cv_obj, cv_field);
 171             gclog_or_tty->print_cr("----------");
 172             _failures = true;
 173             if (!failed) _n_failures++;
 174           }
 175         }
 176       }
 177     }
 178   }
 179 };
 180 
 181 template<class ClosureType>
 182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
 183                                HeapRegion* hr,
 184                                HeapWord* cur, HeapWord* top) {
 185   oop cur_oop = oop(cur);
 186   int oop_size = cur_oop->size();
 187   HeapWord* next_obj = cur + oop_size;
 188   while (next_obj < top) {
 189     // Keep filtering the remembered set.
 190     if (!g1h->is_obj_dead(cur_oop, hr)) {
 191       // Bottom lies entirely below top, so we can call the
 192       // non-memRegion version of oop_iterate below.
 193       cur_oop->oop_iterate(cl);
 194     }
 195     cur = next_obj;
 196     cur_oop = oop(cur);
 197     oop_size = cur_oop->size();
 198     next_obj = cur + oop_size;
 199   }
 200   return cur;
 201 }
 202 
 203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
 204                                               HeapWord* bottom,
 205                                               HeapWord* top,
 206                                               OopClosure* cl) {
 207   G1CollectedHeap* g1h = _g1;
 208 
 209   int oop_size;
 210 
 211   OopClosure* cl2 = cl;
 212   FilterIntoCSClosure intoCSFilt(this, g1h, cl);
 213   FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
 214   switch (_fk) {
 215   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
 216   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
 217   }
 218 
 219   // Start filtering what we add to the remembered set. If the object is
 220   // not considered dead, either because it is marked (in the mark bitmap)
 221   // or it was allocated after marking finished, then we add it. Otherwise
 222   // we can safely ignore the object.
 223   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
 224     oop_size = oop(bottom)->oop_iterate(cl2, mr);
 225   } else {
 226     oop_size = oop(bottom)->size();
 227   }
 228 
 229   bottom += oop_size;
 230 
 231   if (bottom < top) {
 232     // We replicate the loop below for several kinds of possible filters.
 233     switch (_fk) {
 234     case NoFilterKind:
 235       bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
 236       break;
 237     case IntoCSFilterKind: {
 238       FilterIntoCSClosure filt(this, g1h, cl);
 239       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
 240       break;
 241     }
 242     case OutOfRegionFilterKind: {
 243       FilterOutOfRegionClosure filt(_hr, cl);
 244       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
 245       break;
 246     }
 247     default:
 248       ShouldNotReachHere();
 249     }
 250 
 251     // Last object. Need to do dead-obj filtering here too.
 252     if (!g1h->is_obj_dead(oop(bottom), _hr)) {
 253       oop(bottom)->oop_iterate(cl2, mr);
 254     }
 255   }
 256 }
 257 
 258 // Minimum region size; we won't go lower than that.
 259 // We might want to decrease this in the future, to deal with small
 260 // heaps a bit more efficiently.
 261 #define MIN_REGION_SIZE  (      1024 * 1024 )
 262 
 263 // Maximum region size; we don't go higher than that. There's a good
 264 // reason for having an upper bound. We don't want regions to get too
 265 // large, otherwise cleanup's effectiveness would decrease as there
 266 // will be fewer opportunities to find totally empty regions after
 267 // marking.
 268 #define MAX_REGION_SIZE  ( 32 * 1024 * 1024 )
 269 
 270 // The automatic region size calculation will try to have around this
 271 // many regions in the heap (based on the min heap size).
 272 #define TARGET_REGION_NUMBER          2048
 273 
 274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
 275   // region_size in bytes
 276   uintx region_size = G1HeapRegionSize;
 277   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
 278     // We base the automatic calculation on the min heap size. This
 279     // can be problematic if the spread between min and max is quite
 280     // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
 281     // the max size, the region size might be way too large for the
 282     // min size. Either way, some users might have to set the region
 283     // size manually for some -Xms / -Xmx combos.
 284 
 285     region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
 286                        (uintx) MIN_REGION_SIZE);
 287   }
 288 
 289   int region_size_log = log2_long((jlong) region_size);
 290   // Recalculate the region size to make sure it's a power of
 291   // 2. This means that region_size is the largest power of 2 that's
 292   // <= what we've calculated so far.
 293   region_size = ((uintx)1 << region_size_log);
 294 
 295   // Now make sure that we don't go over or under our limits.
 296   if (region_size < MIN_REGION_SIZE) {
 297     region_size = MIN_REGION_SIZE;
 298   } else if (region_size > MAX_REGION_SIZE) {
 299     region_size = MAX_REGION_SIZE;
 300   }
 301 
 302   // And recalculate the log.
 303   region_size_log = log2_long((jlong) region_size);
 304 
 305   // Now, set up the globals.
 306   guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
 307   LogOfHRGrainBytes = region_size_log;
 308 
 309   guarantee(LogOfHRGrainWords == 0, "we should only set it once");
 310   LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
 311 
 312   guarantee(GrainBytes == 0, "we should only set it once");
 313   // The cast to int is safe, given that we've bounded region_size by
 314   // MIN_REGION_SIZE and MAX_REGION_SIZE.
 315   GrainBytes = (int) region_size;
 316 
 317   guarantee(GrainWords == 0, "we should only set it once");
 318   GrainWords = GrainBytes >> LogHeapWordSize;
 319   guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
 320 
 321   guarantee(CardsPerRegion == 0, "we should only set it once");
 322   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
 323 }
 324 
 325 void HeapRegion::reset_after_compaction() {
 326   G1OffsetTableContigSpace::reset_after_compaction();
 327   // After a compaction the mark bitmap is invalid, so we must
 328   // treat all objects as being inside the unmarked area.
 329   zero_marked_bytes();
 330   init_top_at_mark_start();
 331 }
 332 
 333 DirtyCardToOopClosure*
 334 HeapRegion::new_dcto_closure(OopClosure* cl,
 335                              CardTableModRefBS::PrecisionStyle precision,
 336                              HeapRegionDCTOC::FilterKind fk) {
 337   return new HeapRegionDCTOC(G1CollectedHeap::heap(),
 338                              this, cl, precision, fk);
 339 }
 340 
 341 void HeapRegion::hr_clear(bool par, bool clear_space) {
 342   _humongous_type = NotHumongous;
 343   _humongous_start_region = NULL;
 344   _in_collection_set = false;
 345   _is_gc_alloc_region = false;
 346 
 347   // Age stuff (if parallel, this will be done separately, since it needs
 348   // to be sequential).
 349   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 350 
 351   set_young_index_in_cset(-1);
 352   uninstall_surv_rate_group();
 353   set_young_type(NotYoung);
 354 
 355   // In case it had been the start of a humongous sequence, reset its end.
 356   set_end(_orig_end);
 357 
 358   if (!par) {
 359     // If this is parallel, this will be done later.
 360     HeapRegionRemSet* hrrs = rem_set();
 361     if (hrrs != NULL) hrrs->clear();
 362     _claimed = InitialClaimValue;
 363   }
 364   zero_marked_bytes();
 365   set_sort_index(-1);
 366 
 367   _offsets.resize(HeapRegion::GrainWords);
 368   init_top_at_mark_start();
 369   if (clear_space) clear(SpaceDecorator::Mangle);
 370 }
 371 
 372 // <PREDICTION>
 373 void HeapRegion::calc_gc_efficiency() {
 374   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 375   _gc_efficiency = (double) garbage_bytes() /
 376                             g1h->predict_region_elapsed_time_ms(this, false);
 377 }
 378 // </PREDICTION>
 379 
 380 void HeapRegion::set_startsHumongous() {
 381   _humongous_type = StartsHumongous;
 382   _humongous_start_region = this;
 383   assert(end() == _orig_end, "Should be normal before alloc.");
 384 }
 385 
 386 bool HeapRegion::claimHeapRegion(jint claimValue) {
 387   jint current = _claimed;
 388   if (current != claimValue) {
 389     jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
 390     if (res == current) {
 391       return true;
 392     }
 393   }
 394   return false;
 395 }
 396 
 397 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
 398   HeapWord* low = addr;
 399   HeapWord* high = end();
 400   while (low < high) {
 401     size_t diff = pointer_delta(high, low);
 402     // Must add one below to bias toward the high amount.  Otherwise, if
 403   // "high" were at the desired value, and "low" were one less, we
 404     // would not converge on "high".  This is not symmetric, because
 405     // we set "high" to a block start, which might be the right one,
 406     // which we don't do for "low".
 407     HeapWord* middle = low + (diff+1)/2;
 408     if (middle == high) return high;
 409     HeapWord* mid_bs = block_start_careful(middle);
 410     if (mid_bs < addr) {
 411       low = middle;
 412     } else {
 413       high = mid_bs;
 414     }
 415   }
 416   assert(low == high && low >= addr, "Didn't work.");
 417   return low;
 418 }
 419 
 420 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
 421   assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
 422   _next_in_special_set = r;
 423 }
 424 
 425 void HeapRegion::set_on_unclean_list(bool b) {
 426   _is_on_unclean_list = b;
 427 }
 428 
 429 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 430   G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
 431   hr_clear(false/*par*/, clear_space);
 432 }
 433 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 434 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 435 #endif // _MSC_VER
 436 
 437 
 438 HeapRegion::
 439 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
 440                      MemRegion mr, bool is_zeroed)
 441   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
 442     _next_fk(HeapRegionDCTOC::NoFilterKind),
 443     _hrs_index(-1),
 444     _humongous_type(NotHumongous), _humongous_start_region(NULL),
 445     _in_collection_set(false), _is_gc_alloc_region(false),
 446     _is_on_free_list(false), _is_on_unclean_list(false),
 447     _next_in_special_set(NULL), _orig_end(NULL),
 448     _claimed(InitialClaimValue), _evacuation_failed(false),
 449     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
 450     _young_type(NotYoung), _next_young_region(NULL),
 451     _next_dirty_cards_region(NULL),
 452     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 453     _rem_set(NULL), _zfs(NotZeroFilled),
 454     _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 455     _predicted_bytes_to_copy(0)
 456 {
 457   _orig_end = mr.end();
 458   // Note that initialize() will set the start of the unmarked area of the
 459   // region.
 460   this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
 461   set_top(bottom());
 462   set_saved_mark();
 463 
 464   _rem_set =  new HeapRegionRemSet(sharedOffsetArray, this);
 465 
 466   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 467   // In case the region is allocated during a pause, note the top.
 468   // We haven't done any counting on a brand new region.
 469   _top_at_conc_mark_count = bottom();
 470 }
 471 
 472 class NextCompactionHeapRegionClosure: public HeapRegionClosure {
 473   const HeapRegion* _target;
 474   bool _target_seen;
 475   HeapRegion* _last;
 476   CompactibleSpace* _res;
 477 public:
 478   NextCompactionHeapRegionClosure(const HeapRegion* target) :
 479     _target(target), _target_seen(false), _res(NULL) {}
 480   bool doHeapRegion(HeapRegion* cur) {
 481     if (_target_seen) {
 482       if (!cur->isHumongous()) {
 483         _res = cur;
 484         return true;
 485       }
 486     } else if (cur == _target) {
 487       _target_seen = true;
 488     }
 489     return false;
 490   }
 491   CompactibleSpace* result() { return _res; }
 492 };
 493 
 494 CompactibleSpace* HeapRegion::next_compaction_space() const {
 495   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 496   // cast away const-ness
 497   HeapRegion* r = (HeapRegion*) this;
 498   NextCompactionHeapRegionClosure blk(r);
 499   g1h->heap_region_iterate_from(r, &blk);
 500   return blk.result();
 501 }
 502 
 503 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
 504   // The order is important here.
 505   start->add_continuingHumongousRegion(this);
 506   _humongous_type = ContinuesHumongous;
 507   _humongous_start_region = start;
 508 }
 509 
 510 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
 511   // Must join the blocks of the current H region seq with the block of the
 512   // added region.
 513   offsets()->join_blocks(bottom(), cont->bottom());
 514   arrayOop obj = (arrayOop)(bottom());
 515   obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
 516   set_end(cont->end());
 517   set_top(cont->end());
 518 }
 519 
 520 void HeapRegion::save_marks() {
 521   set_saved_mark();
 522 }
 523 
 524 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) {
 525   HeapWord* p = mr.start();
 526   HeapWord* e = mr.end();
 527   oop obj;
 528   while (p < e) {
 529     obj = oop(p);
 530     p += obj->oop_iterate(cl);
 531   }
 532   assert(p == e, "bad memregion: doesn't end on obj boundary");
 533 }
 534 
 535 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 536 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
 537   ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl);              \
 538 }
 539 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
 540 
 541 
 542 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
 543   oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
 544 }
 545 
 546 #ifdef DEBUG
 547 HeapWord* HeapRegion::allocate(size_t size) {
 548   jint state = zero_fill_state();
 549   assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
 550          zero_fill_is_allocated(),
 551          "When ZF is on, only alloc in ZF'd regions");
 552   return G1OffsetTableContigSpace::allocate(size);
 553 }
 554 #endif
 555 
 556 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
 557   assert(ZF_mon->owned_by_self() ||
 558          Universe::heap()->is_gc_active(),
 559          "Must hold the lock or be a full GC to modify.");
 560 #ifdef ASSERT
 561   if (top() != bottom() && zfs != Allocated) {
 562     ResourceMark rm;
 563     stringStream region_str;
 564     print_on(&region_str);
 565     assert(top() == bottom() || zfs == Allocated,
 566            err_msg("Region must be empty, or we must be setting it to allocated. "
 567                    "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
 568   }
 569 #endif
 570   _zfs = zfs;
 571 }
 572 
 573 void HeapRegion::set_zero_fill_complete() {
 574   set_zero_fill_state_work(ZeroFilled);
 575   if (ZF_mon->owned_by_self()) {
 576     ZF_mon->notify_all();
 577   }
 578 }
 579 
 580 
 581 void HeapRegion::ensure_zero_filled() {
 582   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
 583   ensure_zero_filled_locked();
 584 }
 585 
 586 void HeapRegion::ensure_zero_filled_locked() {
 587   assert(ZF_mon->owned_by_self(), "Precondition");
 588   bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
 589   assert(should_ignore_zf || Heap_lock->is_locked(),
 590          "Either we're in a GC or we're allocating a region.");
 591   switch (zero_fill_state()) {
 592   case HeapRegion::NotZeroFilled:
 593     set_zero_fill_in_progress(Thread::current());
 594     {
 595       ZF_mon->unlock();
 596       Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
 597       ZF_mon->lock_without_safepoint_check();
 598     }
 599     // A trap.
 600     guarantee(zero_fill_state() == HeapRegion::ZeroFilling
 601               && zero_filler() == Thread::current(),
 602               "AHA!  Tell Dave D if you see this...");
 603     set_zero_fill_complete();
 604     // gclog_or_tty->print_cr("Did sync ZF.");
 605     ConcurrentZFThread::note_sync_zfs();
 606     break;
 607   case HeapRegion::ZeroFilling:
 608     if (should_ignore_zf) {
 609       // We can "break" the lock and take over the work.
 610       Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
 611       set_zero_fill_complete();
 612       ConcurrentZFThread::note_sync_zfs();
 613       break;
 614     } else {
 615       ConcurrentZFThread::wait_for_ZF_completed(this);
 616     }
 617   case HeapRegion::ZeroFilled:
 618     // Nothing to do.
 619     break;
 620   case HeapRegion::Allocated:
 621     guarantee(false, "Should not call on allocated regions.");
 622   }
 623   assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
 624 }
 625 
 626 HeapWord*
 627 HeapRegion::object_iterate_mem_careful(MemRegion mr,
 628                                                  ObjectClosure* cl) {
 629   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 630   // We used to use "block_start_careful" here.  But we're actually happy
 631   // to update the BOT while we do this...
 632   HeapWord* cur = block_start(mr.start());
 633   mr = mr.intersection(used_region());
 634   if (mr.is_empty()) return NULL;
 635   // Otherwise, find the obj that extends onto mr.start().
 636 
 637   assert(cur <= mr.start()
 638          && (oop(cur)->klass_or_null() == NULL ||
 639              cur + oop(cur)->size() > mr.start()),
 640          "postcondition of block_start");
 641   oop obj;
 642   while (cur < mr.end()) {
 643     obj = oop(cur);
 644     if (obj->klass_or_null() == NULL) {
 645       // Ran into an unparseable point.
 646       return cur;
 647     } else if (!g1h->is_obj_dead(obj)) {
 648       cl->do_object(obj);
 649     }
 650     if (cl->abort()) return cur;
 651     // The check above must occur before the operation below, since an
 652     // abort might invalidate the "size" operation.
 653     cur += obj->size();
 654   }
 655   return NULL;
 656 }
 657 
 658 HeapWord*
 659 HeapRegion::
 660 oops_on_card_seq_iterate_careful(MemRegion mr,
 661                                  FilterOutOfRegionClosure* cl,
 662                                  bool filter_young) {
 663   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 664 
 665   // If we're within a stop-world GC, then we might look at a card in a
 666   // GC alloc region that extends onto a GC LAB, which may not be
 667   // parseable.  Stop such at the "saved_mark" of the region.
 668   if (G1CollectedHeap::heap()->is_gc_active()) {
 669     mr = mr.intersection(used_region_at_save_marks());
 670   } else {
 671     mr = mr.intersection(used_region());
 672   }
 673   if (mr.is_empty()) return NULL;
 674   // Otherwise, find the obj that extends onto mr.start().
 675 
 676   // The intersection of the incoming mr (for the card) and the
 677   // allocated part of the region is non-empty. This implies that
 678   // we have actually allocated into this region. The code in
 679   // G1CollectedHeap.cpp that allocates a new region sets the
 680   // is_young tag on the region before allocating. Thus we
 681   // safely know if this region is young.
 682   if (is_young() && filter_young) {
 683     return NULL;
 684   }
 685 
 686   assert(!is_young(), "check value of filter_young");
 687 
 688   // We used to use "block_start_careful" here.  But we're actually happy
 689   // to update the BOT while we do this...
 690   HeapWord* cur = block_start(mr.start());
 691   assert(cur <= mr.start(), "Postcondition");
 692 
 693   while (cur <= mr.start()) {
 694     if (oop(cur)->klass_or_null() == NULL) {
 695       // Ran into an unparseable point.
 696       return cur;
 697     }
 698     // Otherwise...
 699     int sz = oop(cur)->size();
 700     if (cur + sz > mr.start()) break;
 701     // Otherwise, go on.
 702     cur = cur + sz;
 703   }
 704   oop obj;
 705   obj = oop(cur);
 706   // If we finish this loop...
 707   assert(cur <= mr.start()
 708          && obj->klass_or_null() != NULL
 709          && cur + obj->size() > mr.start(),
 710          "Loop postcondition");
 711   if (!g1h->is_obj_dead(obj)) {
 712     obj->oop_iterate(cl, mr);
 713   }
 714 
 715   HeapWord* next;
 716   while (cur < mr.end()) {
 717     obj = oop(cur);
 718     if (obj->klass_or_null() == NULL) {
 719       // Ran into an unparseable point.
 720       return cur;
 721     };
 722     // Otherwise:
 723     next = (cur + obj->size());
 724     if (!g1h->is_obj_dead(obj)) {
 725       if (next < mr.end()) {
 726         obj->oop_iterate(cl);
 727       } else {
 728         // this obj spans the boundary.  If it's an array, stop at the
 729         // boundary.
 730         if (obj->is_objArray()) {
 731           obj->oop_iterate(cl, mr);
 732         } else {
 733           obj->oop_iterate(cl);
 734         }
 735       }
 736     }
 737     cur = next;
 738   }
 739   return NULL;
 740 }
 741 
 742 void HeapRegion::print() const { print_on(gclog_or_tty); }
 743 void HeapRegion::print_on(outputStream* st) const {
 744   if (isHumongous()) {
 745     if (startsHumongous())
 746       st->print(" HS");
 747     else
 748       st->print(" HC");
 749   } else {
 750     st->print("   ");
 751   }
 752   if (in_collection_set())
 753     st->print(" CS");
 754   else if (is_gc_alloc_region())
 755     st->print(" A ");
 756   else
 757     st->print("   ");
 758   if (is_young())
 759     st->print(is_survivor() ? " SU" : " Y ");
 760   else
 761     st->print("   ");
 762   if (is_empty())
 763     st->print(" F");
 764   else
 765     st->print("  ");
 766   st->print(" %5d", _gc_time_stamp);
 767   st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
 768             prev_top_at_mark_start(), next_top_at_mark_start());
 769   G1OffsetTableContigSpace::print_on(st);
 770 }
 771 
 772 void HeapRegion::verify(bool allow_dirty) const {
 773   bool dummy = false;
 774   verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
 775 }
 776 
 777 #define OBJ_SAMPLE_INTERVAL 0
 778 #define BLOCK_SAMPLE_INTERVAL 100
 779 
 780 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 781 // We would need a mechanism to make that code skip dead objects.
 782 
 783 void HeapRegion::verify(bool allow_dirty,
 784                         bool use_prev_marking,
 785                         bool* failures) const {
 786   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 787   *failures = false;
 788   HeapWord* p = bottom();
 789   HeapWord* prev_p = NULL;
 790   int objs = 0;
 791   int blocks = 0;
 792   VerifyLiveClosure vl_cl(g1, use_prev_marking);
 793   bool is_humongous = isHumongous();
 794   size_t object_num = 0;
 795   while (p < top()) {
 796     size_t size = oop(p)->size();
 797     if (is_humongous != g1->isHumongous(size)) {
 798       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
 799                              SIZE_FORMAT" words) in a %shumongous region",
 800                              p, g1->isHumongous(size) ? "" : "non-",
 801                              size, is_humongous ? "" : "non-");
 802        *failures = true;
 803     }
 804     object_num += 1;
 805     if (blocks == BLOCK_SAMPLE_INTERVAL) {
 806       HeapWord* res = block_start_const(p + (size/2));
 807       if (p != res) {
 808         gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
 809                                SIZE_FORMAT" returned "PTR_FORMAT,
 810                                p, size, res);
 811         *failures = true;
 812         return;
 813       }
 814       blocks = 0;
 815     } else {
 816       blocks++;
 817     }
 818     if (objs == OBJ_SAMPLE_INTERVAL) {
 819       oop obj = oop(p);
 820       if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
 821         if (obj->is_oop()) {
 822           klassOop klass = obj->klass();
 823           if (!klass->is_perm()) {
 824             gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
 825                                    "not in perm", klass, obj);
 826             *failures = true;
 827             return;
 828           } else if (!klass->is_klass()) {
 829             gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
 830                                    "not a klass", klass, obj);
 831             *failures = true;
 832             return;
 833           } else {
 834             vl_cl.set_containing_obj(obj);
 835             obj->oop_iterate(&vl_cl);
 836             if (vl_cl.failures()) {
 837               *failures = true;
 838             }
 839             if (G1MaxVerifyFailures >= 0 &&
 840                 vl_cl.n_failures() >= G1MaxVerifyFailures) {
 841               return;
 842             }
 843           }
 844         } else {
 845           gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
 846           *failures = true;
 847           return;
 848         }
 849       }
 850       objs = 0;
 851     } else {
 852       objs++;
 853     }
 854     prev_p = p;
 855     p += size;
 856   }
 857   HeapWord* rend = end();
 858   HeapWord* rtop = top();
 859   if (rtop < rend) {
 860     HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
 861     if (res != rtop) {
 862         gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
 863                                PTR_FORMAT" returned "PTR_FORMAT,
 864                                rtop, rend, res);
 865         *failures = true;
 866         return;
 867     }
 868   }
 869 
 870   if (is_humongous && object_num > 1) {
 871     gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
 872                            "but has "SIZE_FORMAT", objects",
 873                            bottom(), end(), object_num);
 874     *failures = true;
 875   }
 876 
 877   if (p != top()) {
 878     gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
 879                            "does not match top "PTR_FORMAT, p, top());
 880     *failures = true;
 881     return;
 882   }
 883 }
 884 
 885 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
 886 // away eventually.
 887 
 888 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 889   // false ==> we'll do the clearing if there's clearing to be done.
 890   ContiguousSpace::initialize(mr, false, mangle_space);
 891   _offsets.zero_bottom_entry();
 892   _offsets.initialize_threshold();
 893   if (clear_space) clear(mangle_space);
 894 }
 895 
 896 void G1OffsetTableContigSpace::clear(bool mangle_space) {
 897   ContiguousSpace::clear(mangle_space);
 898   _offsets.zero_bottom_entry();
 899   _offsets.initialize_threshold();
 900 }
 901 
 902 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
 903   Space::set_bottom(new_bottom);
 904   _offsets.set_bottom(new_bottom);
 905 }
 906 
 907 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
 908   Space::set_end(new_end);
 909   _offsets.resize(new_end - bottom());
 910 }
 911 
 912 void G1OffsetTableContigSpace::print() const {
 913   print_short();
 914   gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
 915                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 916                 bottom(), top(), _offsets.threshold(), end());
 917 }
 918 
 919 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
 920   return _offsets.initialize_threshold();
 921 }
 922 
 923 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
 924                                                     HeapWord* end) {
 925   _offsets.alloc_block(start, end);
 926   return _offsets.threshold();
 927 }
 928 
 929 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
 930   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 931   assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
 932   if (_gc_time_stamp < g1h->get_gc_time_stamp())
 933     return top();
 934   else
 935     return ContiguousSpace::saved_mark_word();
 936 }
 937 
 938 void G1OffsetTableContigSpace::set_saved_mark() {
 939   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 940   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
 941 
 942   if (_gc_time_stamp < curr_gc_time_stamp) {
 943     // The order of these is important, as another thread might be
 944     // about to start scanning this region. If it does so after
 945     // set_saved_mark and before _gc_time_stamp = ..., then the latter
 946     // will be false, and it will pick up top() as the high water mark
 947     // of region. If it does so after _gc_time_stamp = ..., then it
 948     // will pick up the right saved_mark_word() as the high water mark
 949     // of the region. Either way, the behaviour will be correct.
 950     ContiguousSpace::set_saved_mark();
 951     OrderAccess::storestore();
 952     _gc_time_stamp = curr_gc_time_stamp;
 953     // The following fence is to force a flush of the writes above, but
 954     // is strictly not needed because when an allocating worker thread
 955     // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
 956     // when the lock is released, the write will be flushed.
 957     // OrderAccess::fence();
 958   }
 959 }
 960 
 961 G1OffsetTableContigSpace::
 962 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
 963                          MemRegion mr, bool is_zeroed) :
 964   _offsets(sharedOffsetArray, mr),
 965   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
 966   _gc_time_stamp(0)
 967 {
 968   _offsets.set_space(this);
 969   initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
 970 }
 971 
 972 size_t RegionList::length() {
 973   size_t len = 0;
 974   HeapRegion* cur = hd();
 975   DEBUG_ONLY(HeapRegion* last = NULL);
 976   while (cur != NULL) {
 977     len++;
 978     DEBUG_ONLY(last = cur);
 979     cur = get_next(cur);
 980   }
 981   assert(last == tl(), "Invariant");
 982   return len;
 983 }
 984 
 985 void RegionList::insert_before_head(HeapRegion* r) {
 986   assert(well_formed(), "Inv");
 987   set_next(r, hd());
 988   _hd = r;
 989   _sz++;
 990   if (tl() == NULL) _tl = r;
 991   assert(well_formed(), "Inv");
 992 }
 993 
 994 void RegionList::prepend_list(RegionList* new_list) {
 995   assert(well_formed(), "Precondition");
 996   assert(new_list->well_formed(), "Precondition");
 997   HeapRegion* new_tl = new_list->tl();
 998   if (new_tl != NULL) {
 999     set_next(new_tl, hd());
1000     _hd = new_list->hd();
1001     _sz += new_list->sz();
1002     if (tl() == NULL) _tl = new_list->tl();
1003   } else {
1004     assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
1005   }
1006   assert(well_formed(), "Inv");
1007 }
1008 
1009 void RegionList::delete_after(HeapRegion* r) {
1010   assert(well_formed(), "Precondition");
1011   HeapRegion* next = get_next(r);
1012   assert(r != NULL, "Precondition");
1013   HeapRegion* next_tl = get_next(next);
1014   set_next(r, next_tl);
1015   dec_sz();
1016   if (next == tl()) {
1017     assert(next_tl == NULL, "Inv");
1018     _tl = r;
1019   }
1020   assert(well_formed(), "Inv");
1021 }
1022 
1023 HeapRegion* RegionList::pop() {
1024   assert(well_formed(), "Inv");
1025   HeapRegion* res = hd();
1026   if (res != NULL) {
1027     _hd = get_next(res);
1028     _sz--;
1029     set_next(res, NULL);
1030     if (sz() == 0) _tl = NULL;
1031   }
1032   assert(well_formed(), "Inv");
1033   return res;
1034 }