1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  33 #include "gc_implementation/g1/g1Log.hpp"
  34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  35 #include "gc_implementation/g1/g1RemSet.hpp"
  36 #include "gc_implementation/g1/heapRegion.inline.hpp"
  37 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
  38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  39 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  40 #include "gc_implementation/shared/vmGCOperations.hpp"
  41 #include "gc_implementation/shared/gcTimer.hpp"
  42 #include "gc_implementation/shared/gcTrace.hpp"
  43 #include "gc_implementation/shared/gcTraceTime.hpp"
  44 #include "memory/allocation.hpp"
  45 #include "memory/genOopClosures.inline.hpp"
  46 #include "memory/referencePolicy.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/atomic.inline.hpp"
  52 #include "runtime/prefetch.inline.hpp"
  53 #include "services/memTracker.hpp"
  54 
  55 // Concurrent marking bit map wrapper
  56 
  57 CMBitMapRO::CMBitMapRO(int shifter) :
  58   _bm(),
  59   _shifter(shifter) {
  60   _bmStartWord = 0;
  61   _bmWordSize = 0;
  62 }
  63 
  64 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  65                                                const HeapWord* limit) const {
  66   // First we must round addr *up* to a possible object boundary.
  67   addr = (HeapWord*)align_size_up((intptr_t)addr,
  68                                   HeapWordSize << _shifter);
  69   size_t addrOffset = heapWordToOffset(addr);
  70   if (limit == NULL) {
  71     limit = _bmStartWord + _bmWordSize;
  72   }
  73   size_t limitOffset = heapWordToOffset(limit);
  74   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  75   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  76   assert(nextAddr >= addr, "get_next_one postcondition");
  77   assert(nextAddr == limit || isMarked(nextAddr),
  78          "get_next_one postcondition");
  79   return nextAddr;
  80 }
  81 
  82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  83                                                  const HeapWord* limit) const {
  84   size_t addrOffset = heapWordToOffset(addr);
  85   if (limit == NULL) {
  86     limit = _bmStartWord + _bmWordSize;
  87   }
  88   size_t limitOffset = heapWordToOffset(limit);
  89   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  90   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  91   assert(nextAddr >= addr, "get_next_one postcondition");
  92   assert(nextAddr == limit || !isMarked(nextAddr),
  93          "get_next_one postcondition");
  94   return nextAddr;
  95 }
  96 
  97 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  98   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  99   return (int) (diff >> _shifter);
 100 }
 101 
 102 #ifndef PRODUCT
 103 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 104   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 105   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 106          "size inconsistency");
 107   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 108          _bmWordSize  == heap_rs.word_size();
 109 }
 110 #endif
 111 
 112 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 113   _bm.print_on_error(st, prefix);
 114 }
 115 
 116 size_t CMBitMap::compute_size(size_t heap_size) {
 117   return heap_size / mark_distance();
 118 }
 119 
 120 size_t CMBitMap::mark_distance() {
 121   return MinObjAlignmentInBytes * BitsPerByte;
 122 }
 123 
 124 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 125   _bmStartWord = heap.start();
 126   _bmWordSize = heap.word_size();
 127 
 128   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 129   _bm.set_size(_bmWordSize >> _shifter);
 130 
 131   storage->set_mapping_changed_listener(&_listener);
 132 }
 133 
 134 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
 135   // We need to clear the bitmap on commit, removing any existing information.
 136   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 137   _bm->clearRange(mr);
 138 }
 139 
 140 // Closure used for clearing the given mark bitmap.
 141 class ClearBitmapHRClosure : public HeapRegionClosure {
 142  private:
 143   ConcurrentMark* _cm;
 144   CMBitMap* _bitmap;
 145   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 146  public:
 147   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 148     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 149   }
 150 
 151   virtual bool doHeapRegion(HeapRegion* r) {
 152     size_t const chunk_size_in_words = M / HeapWordSize;
 153 
 154     HeapWord* cur = r->bottom();
 155     HeapWord* const end = r->end();
 156 
 157     while (cur < end) {
 158       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 159       _bitmap->clearRange(mr);
 160 
 161       cur += chunk_size_in_words;
 162 
 163       // Abort iteration if after yielding the marking has been aborted.
 164       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 165         return true;
 166       }
 167       // Repeat the asserts from before the start of the closure. We will do them
 168       // as asserts here to minimize their overhead on the product. However, we
 169       // will have them as guarantees at the beginning / end of the bitmap
 170       // clearing to get some checking in the product.
 171       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 172       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 173     }
 174 
 175     return false;
 176   }
 177 };
 178 
 179 void CMBitMap::clearAll() {
 180   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 181   G1CollectedHeap::heap()->heap_region_iterate(&cl);
 182   guarantee(cl.complete(), "Must have completed iteration.");
 183   return;
 184 }
 185 
 186 void CMBitMap::markRange(MemRegion mr) {
 187   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 188   assert(!mr.is_empty(), "unexpected empty region");
 189   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 190           ((HeapWord *) mr.end())),
 191          "markRange memory region end is not card aligned");
 192   // convert address range into offset range
 193   _bm.at_put_range(heapWordToOffset(mr.start()),
 194                    heapWordToOffset(mr.end()), true);
 195 }
 196 
 197 void CMBitMap::clearRange(MemRegion mr) {
 198   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 199   assert(!mr.is_empty(), "unexpected empty region");
 200   // convert address range into offset range
 201   _bm.at_put_range(heapWordToOffset(mr.start()),
 202                    heapWordToOffset(mr.end()), false);
 203 }
 204 
 205 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 206                                             HeapWord* end_addr) {
 207   HeapWord* start = getNextMarkedWordAddress(addr);
 208   start = MIN2(start, end_addr);
 209   HeapWord* end   = getNextUnmarkedWordAddress(start);
 210   end = MIN2(end, end_addr);
 211   assert(start <= end, "Consistency check");
 212   MemRegion mr(start, end);
 213   if (!mr.is_empty()) {
 214     clearRange(mr);
 215   }
 216   return mr;
 217 }
 218 
 219 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 220   _base(NULL), _cm(cm)
 221 #ifdef ASSERT
 222   , _drain_in_progress(false)
 223   , _drain_in_progress_yields(false)
 224 #endif
 225 {}
 226 
 227 bool CMMarkStack::allocate(size_t capacity) {
 228   // allocate a stack of the requisite depth
 229   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 230   if (!rs.is_reserved()) {
 231     warning("ConcurrentMark MarkStack allocation failure");
 232     return false;
 233   }
 234   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 235   if (!_virtual_space.initialize(rs, rs.size())) {
 236     warning("ConcurrentMark MarkStack backing store failure");
 237     // Release the virtual memory reserved for the marking stack
 238     rs.release();
 239     return false;
 240   }
 241   assert(_virtual_space.committed_size() == rs.size(),
 242          "Didn't reserve backing store for all of ConcurrentMark stack?");
 243   _base = (oop*) _virtual_space.low();
 244   setEmpty();
 245   _capacity = (jint) capacity;
 246   _saved_index = -1;
 247   _should_expand = false;
 248   NOT_PRODUCT(_max_depth = 0);
 249   return true;
 250 }
 251 
 252 void CMMarkStack::expand() {
 253   // Called, during remark, if we've overflown the marking stack during marking.
 254   assert(isEmpty(), "stack should been emptied while handling overflow");
 255   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 256   // Clear expansion flag
 257   _should_expand = false;
 258   if (_capacity == (jint) MarkStackSizeMax) {
 259     if (PrintGCDetails && Verbose) {
 260       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 261     }
 262     return;
 263   }
 264   // Double capacity if possible
 265   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 266   // Do not give up existing stack until we have managed to
 267   // get the double capacity that we desired.
 268   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 269                                                            sizeof(oop)));
 270   if (rs.is_reserved()) {
 271     // Release the backing store associated with old stack
 272     _virtual_space.release();
 273     // Reinitialize virtual space for new stack
 274     if (!_virtual_space.initialize(rs, rs.size())) {
 275       fatal("Not enough swap for expanded marking stack capacity");
 276     }
 277     _base = (oop*)(_virtual_space.low());
 278     _index = 0;
 279     _capacity = new_capacity;
 280   } else {
 281     if (PrintGCDetails && Verbose) {
 282       // Failed to double capacity, continue;
 283       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 284                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 285                           _capacity / K, new_capacity / K);
 286     }
 287   }
 288 }
 289 
 290 void CMMarkStack::set_should_expand() {
 291   // If we're resetting the marking state because of an
 292   // marking stack overflow, record that we should, if
 293   // possible, expand the stack.
 294   _should_expand = _cm->has_overflown();
 295 }
 296 
 297 CMMarkStack::~CMMarkStack() {
 298   if (_base != NULL) {
 299     _base = NULL;
 300     _virtual_space.release();
 301   }
 302 }
 303 
 304 void CMMarkStack::par_push(oop ptr) {
 305   while (true) {
 306     if (isFull()) {
 307       _overflow = true;
 308       return;
 309     }
 310     // Otherwise...
 311     jint index = _index;
 312     jint next_index = index+1;
 313     jint res = Atomic::cmpxchg(next_index, &_index, index);
 314     if (res == index) {
 315       _base[index] = ptr;
 316       // Note that we don't maintain this atomically.  We could, but it
 317       // doesn't seem necessary.
 318       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 319       return;
 320     }
 321     // Otherwise, we need to try again.
 322   }
 323 }
 324 
 325 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 326   while (true) {
 327     if (isFull()) {
 328       _overflow = true;
 329       return;
 330     }
 331     // Otherwise...
 332     jint index = _index;
 333     jint next_index = index + n;
 334     if (next_index > _capacity) {
 335       _overflow = true;
 336       return;
 337     }
 338     jint res = Atomic::cmpxchg(next_index, &_index, index);
 339     if (res == index) {
 340       for (int i = 0; i < n; i++) {
 341         int  ind = index + i;
 342         assert(ind < _capacity, "By overflow test above.");
 343         _base[ind] = ptr_arr[i];
 344       }
 345       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 346       return;
 347     }
 348     // Otherwise, we need to try again.
 349   }
 350 }
 351 
 352 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 353   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 354   jint start = _index;
 355   jint next_index = start + n;
 356   if (next_index > _capacity) {
 357     _overflow = true;
 358     return;
 359   }
 360   // Otherwise.
 361   _index = next_index;
 362   for (int i = 0; i < n; i++) {
 363     int ind = start + i;
 364     assert(ind < _capacity, "By overflow test above.");
 365     _base[ind] = ptr_arr[i];
 366   }
 367   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 368 }
 369 
 370 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 371   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 372   jint index = _index;
 373   if (index == 0) {
 374     *n = 0;
 375     return false;
 376   } else {
 377     int k = MIN2(max, index);
 378     jint  new_ind = index - k;
 379     for (int j = 0; j < k; j++) {
 380       ptr_arr[j] = _base[new_ind + j];
 381     }
 382     _index = new_ind;
 383     *n = k;
 384     return true;
 385   }
 386 }
 387 
 388 template<class OopClosureClass>
 389 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 390   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 391          || SafepointSynchronize::is_at_safepoint(),
 392          "Drain recursion must be yield-safe.");
 393   bool res = true;
 394   debug_only(_drain_in_progress = true);
 395   debug_only(_drain_in_progress_yields = yield_after);
 396   while (!isEmpty()) {
 397     oop newOop = pop();
 398     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 399     assert(newOop->is_oop(), "Expected an oop");
 400     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 401            "only grey objects on this stack");
 402     newOop->oop_iterate(cl);
 403     if (yield_after && _cm->do_yield_check()) {
 404       res = false;
 405       break;
 406     }
 407   }
 408   debug_only(_drain_in_progress = false);
 409   return res;
 410 }
 411 
 412 void CMMarkStack::note_start_of_gc() {
 413   assert(_saved_index == -1,
 414          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 415   _saved_index = _index;
 416 }
 417 
 418 void CMMarkStack::note_end_of_gc() {
 419   // This is intentionally a guarantee, instead of an assert. If we
 420   // accidentally add something to the mark stack during GC, it
 421   // will be a correctness issue so it's better if we crash. we'll
 422   // only check this once per GC anyway, so it won't be a performance
 423   // issue in any way.
 424   guarantee(_saved_index == _index,
 425             err_msg("saved index: %d index: %d", _saved_index, _index));
 426   _saved_index = -1;
 427 }
 428 
 429 void CMMarkStack::oops_do(OopClosure* f) {
 430   assert(_saved_index == _index,
 431          err_msg("saved index: %d index: %d", _saved_index, _index));
 432   for (int i = 0; i < _index; i += 1) {
 433     f->do_oop(&_base[i]);
 434   }
 435 }
 436 
 437 CMRootRegions::CMRootRegions() :
 438   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 439   _should_abort(false),  _next_survivor(NULL) { }
 440 
 441 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 442   _young_list = g1h->young_list();
 443   _cm = cm;
 444 }
 445 
 446 void CMRootRegions::prepare_for_scan() {
 447   assert(!scan_in_progress(), "pre-condition");
 448 
 449   // Currently, only survivors can be root regions.
 450   assert(_next_survivor == NULL, "pre-condition");
 451   _next_survivor = _young_list->first_survivor_region();
 452   _scan_in_progress = (_next_survivor != NULL);
 453   _should_abort = false;
 454 }
 455 
 456 HeapRegion* CMRootRegions::claim_next() {
 457   if (_should_abort) {
 458     // If someone has set the should_abort flag, we return NULL to
 459     // force the caller to bail out of their loop.
 460     return NULL;
 461   }
 462 
 463   // Currently, only survivors can be root regions.
 464   HeapRegion* res = _next_survivor;
 465   if (res != NULL) {
 466     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 467     // Read it again in case it changed while we were waiting for the lock.
 468     res = _next_survivor;
 469     if (res != NULL) {
 470       if (res == _young_list->last_survivor_region()) {
 471         // We just claimed the last survivor so store NULL to indicate
 472         // that we're done.
 473         _next_survivor = NULL;
 474       } else {
 475         _next_survivor = res->get_next_young_region();
 476       }
 477     } else {
 478       // Someone else claimed the last survivor while we were trying
 479       // to take the lock so nothing else to do.
 480     }
 481   }
 482   assert(res == NULL || res->is_survivor(), "post-condition");
 483 
 484   return res;
 485 }
 486 
 487 void CMRootRegions::scan_finished() {
 488   assert(scan_in_progress(), "pre-condition");
 489 
 490   // Currently, only survivors can be root regions.
 491   if (!_should_abort) {
 492     assert(_next_survivor == NULL, "we should have claimed all survivors");
 493   }
 494   _next_survivor = NULL;
 495 
 496   {
 497     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 498     _scan_in_progress = false;
 499     RootRegionScan_lock->notify_all();
 500   }
 501 }
 502 
 503 bool CMRootRegions::wait_until_scan_finished() {
 504   if (!scan_in_progress()) return false;
 505 
 506   {
 507     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 508     while (scan_in_progress()) {
 509       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 510     }
 511   }
 512   return true;
 513 }
 514 
 515 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 516 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 517 #endif // _MSC_VER
 518 
 519 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 520   return MAX2((n_par_threads + 2) / 4, 1U);
 521 }
 522 
 523 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 524   _g1h(g1h),
 525   _markBitMap1(),
 526   _markBitMap2(),
 527   _parallel_marking_threads(0),
 528   _max_parallel_marking_threads(0),
 529   _sleep_factor(0.0),
 530   _marking_task_overhead(1.0),
 531   _cleanup_sleep_factor(0.0),
 532   _cleanup_task_overhead(1.0),
 533   _cleanup_list("Cleanup List"),
 534   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 535   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 536             CardTableModRefBS::card_shift,
 537             false /* in_resource_area*/),
 538 
 539   _prevMarkBitMap(&_markBitMap1),
 540   _nextMarkBitMap(&_markBitMap2),
 541 
 542   _markStack(this),
 543   // _finger set in set_non_marking_state
 544 
 545   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 546   // _active_tasks set in set_non_marking_state
 547   // _tasks set inside the constructor
 548   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 549   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 550 
 551   _has_overflown(false),
 552   _concurrent(false),
 553   _has_aborted(false),
 554   _aborted_gc_id(GCId::undefined()),
 555   _restart_for_overflow(false),
 556   _concurrent_marking_in_progress(false),
 557 
 558   // _verbose_level set below
 559 
 560   _init_times(),
 561   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 562   _cleanup_times(),
 563   _total_counting_time(0.0),
 564   _total_rs_scrub_time(0.0),
 565 
 566   _parallel_workers(NULL),
 567 
 568   _count_card_bitmaps(NULL),
 569   _count_marked_bytes(NULL),
 570   _completed_initialization(false) {
 571   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 572   if (verbose_level < no_verbose) {
 573     verbose_level = no_verbose;
 574   }
 575   if (verbose_level > high_verbose) {
 576     verbose_level = high_verbose;
 577   }
 578   _verbose_level = verbose_level;
 579 
 580   if (verbose_low()) {
 581     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 582                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 583   }
 584 
 585   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 586   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 587 
 588   // Create & start a ConcurrentMark thread.
 589   _cmThread = new ConcurrentMarkThread(this);
 590   assert(cmThread() != NULL, "CM Thread should have been created");
 591   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 592   if (_cmThread->osthread() == NULL) {
 593       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 594   }
 595 
 596   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 597   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 598   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 599 
 600   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 601   satb_qs.set_buffer_size(G1SATBBufferSize);
 602 
 603   _root_regions.init(_g1h, this);
 604 
 605   if (ConcGCThreads > ParallelGCThreads) {
 606     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 607             "than ParallelGCThreads (" UINTX_FORMAT ").",
 608             ConcGCThreads, ParallelGCThreads);
 609     return;
 610   }
 611   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 612     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 613     // if both are set
 614     _sleep_factor             = 0.0;
 615     _marking_task_overhead    = 1.0;
 616   } else if (G1MarkingOverheadPercent > 0) {
 617     // We will calculate the number of parallel marking threads based
 618     // on a target overhead with respect to the soft real-time goal
 619     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 620     double overall_cm_overhead =
 621       (double) MaxGCPauseMillis * marking_overhead /
 622       (double) GCPauseIntervalMillis;
 623     double cpu_ratio = 1.0 / (double) os::processor_count();
 624     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 625     double marking_task_overhead =
 626       overall_cm_overhead / marking_thread_num *
 627                                               (double) os::processor_count();
 628     double sleep_factor =
 629                        (1.0 - marking_task_overhead) / marking_task_overhead;
 630 
 631     FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 632     _sleep_factor             = sleep_factor;
 633     _marking_task_overhead    = marking_task_overhead;
 634   } else {
 635     // Calculate the number of parallel marking threads by scaling
 636     // the number of parallel GC threads.
 637     uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 638     FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 639     _sleep_factor             = 0.0;
 640     _marking_task_overhead    = 1.0;
 641   }
 642 
 643   assert(ConcGCThreads > 0, "Should have been set");
 644   _parallel_marking_threads = (uint) ConcGCThreads;
 645   _max_parallel_marking_threads = _parallel_marking_threads;
 646 
 647   if (parallel_marking_threads() > 1) {
 648     _cleanup_task_overhead = 1.0;
 649   } else {
 650     _cleanup_task_overhead = marking_task_overhead();
 651   }
 652   _cleanup_sleep_factor =
 653                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 654 
 655 #if 0
 656   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 657   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 658   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 659   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 660   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 661 #endif
 662 
 663   guarantee(parallel_marking_threads() > 0, "peace of mind");
 664   _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 665        _max_parallel_marking_threads, false, true);
 666   if (_parallel_workers == NULL) {
 667     vm_exit_during_initialization("Failed necessary allocation.");
 668   } else {
 669     _parallel_workers->initialize_workers();
 670   }
 671 
 672   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 673     uintx mark_stack_size =
 674       MIN2(MarkStackSizeMax,
 675           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 676     // Verify that the calculated value for MarkStackSize is in range.
 677     // It would be nice to use the private utility routine from Arguments.
 678     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 679       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 680               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 681               mark_stack_size, (uintx) 1, MarkStackSizeMax);
 682       return;
 683     }
 684     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 685   } else {
 686     // Verify MarkStackSize is in range.
 687     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 688       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 689         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 690           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 691                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 692                   MarkStackSize, (uintx) 1, MarkStackSizeMax);
 693           return;
 694         }
 695       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 696         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 697           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 698                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 699                   MarkStackSize, MarkStackSizeMax);
 700           return;
 701         }
 702       }
 703     }
 704   }
 705 
 706   if (!_markStack.allocate(MarkStackSize)) {
 707     warning("Failed to allocate CM marking stack");
 708     return;
 709   }
 710 
 711   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 712   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 713 
 714   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 715   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 716 
 717   BitMap::idx_t card_bm_size = _card_bm.size();
 718 
 719   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 720   _active_tasks = _max_worker_id;
 721 
 722   size_t max_regions = (size_t) _g1h->max_regions();
 723   for (uint i = 0; i < _max_worker_id; ++i) {
 724     CMTaskQueue* task_queue = new CMTaskQueue();
 725     task_queue->initialize();
 726     _task_queues->register_queue(i, task_queue);
 727 
 728     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 729     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 730 
 731     _tasks[i] = new CMTask(i, this,
 732                            _count_marked_bytes[i],
 733                            &_count_card_bitmaps[i],
 734                            task_queue, _task_queues);
 735 
 736     _accum_task_vtime[i] = 0.0;
 737   }
 738 
 739   // Calculate the card number for the bottom of the heap. Used
 740   // in biasing indexes into the accounting card bitmaps.
 741   _heap_bottom_card_num =
 742     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 743                                 CardTableModRefBS::card_shift);
 744 
 745   // Clear all the liveness counting data
 746   clear_all_count_data();
 747 
 748   // so that the call below can read a sensible value
 749   _heap_start = g1h->reserved_region().start();
 750   set_non_marking_state();
 751   _completed_initialization = true;
 752 }
 753 
 754 void ConcurrentMark::reset() {
 755   // Starting values for these two. This should be called in a STW
 756   // phase.
 757   MemRegion reserved = _g1h->g1_reserved();
 758   _heap_start = reserved.start();
 759   _heap_end   = reserved.end();
 760 
 761   // Separated the asserts so that we know which one fires.
 762   assert(_heap_start != NULL, "heap bounds should look ok");
 763   assert(_heap_end != NULL, "heap bounds should look ok");
 764   assert(_heap_start < _heap_end, "heap bounds should look ok");
 765 
 766   // Reset all the marking data structures and any necessary flags
 767   reset_marking_state();
 768 
 769   if (verbose_low()) {
 770     gclog_or_tty->print_cr("[global] resetting");
 771   }
 772 
 773   // We do reset all of them, since different phases will use
 774   // different number of active threads. So, it's easiest to have all
 775   // of them ready.
 776   for (uint i = 0; i < _max_worker_id; ++i) {
 777     _tasks[i]->reset(_nextMarkBitMap);
 778   }
 779 
 780   // we need this to make sure that the flag is on during the evac
 781   // pause with initial mark piggy-backed
 782   set_concurrent_marking_in_progress();
 783 }
 784 
 785 
 786 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 787   _markStack.set_should_expand();
 788   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 789   if (clear_overflow) {
 790     clear_has_overflown();
 791   } else {
 792     assert(has_overflown(), "pre-condition");
 793   }
 794   _finger = _heap_start;
 795 
 796   for (uint i = 0; i < _max_worker_id; ++i) {
 797     CMTaskQueue* queue = _task_queues->queue(i);
 798     queue->set_empty();
 799   }
 800 }
 801 
 802 void ConcurrentMark::set_concurrency(uint active_tasks) {
 803   assert(active_tasks <= _max_worker_id, "we should not have more");
 804 
 805   _active_tasks = active_tasks;
 806   // Need to update the three data structures below according to the
 807   // number of active threads for this phase.
 808   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 809   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 810   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 811 }
 812 
 813 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 814   set_concurrency(active_tasks);
 815 
 816   _concurrent = concurrent;
 817   // We propagate this to all tasks, not just the active ones.
 818   for (uint i = 0; i < _max_worker_id; ++i)
 819     _tasks[i]->set_concurrent(concurrent);
 820 
 821   if (concurrent) {
 822     set_concurrent_marking_in_progress();
 823   } else {
 824     // We currently assume that the concurrent flag has been set to
 825     // false before we start remark. At this point we should also be
 826     // in a STW phase.
 827     assert(!concurrent_marking_in_progress(), "invariant");
 828     assert(out_of_regions(),
 829            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 830                    p2i(_finger), p2i(_heap_end)));
 831   }
 832 }
 833 
 834 void ConcurrentMark::set_non_marking_state() {
 835   // We set the global marking state to some default values when we're
 836   // not doing marking.
 837   reset_marking_state();
 838   _active_tasks = 0;
 839   clear_concurrent_marking_in_progress();
 840 }
 841 
 842 ConcurrentMark::~ConcurrentMark() {
 843   // The ConcurrentMark instance is never freed.
 844   ShouldNotReachHere();
 845 }
 846 
 847 void ConcurrentMark::clearNextBitmap() {
 848   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 849 
 850   // Make sure that the concurrent mark thread looks to still be in
 851   // the current cycle.
 852   guarantee(cmThread()->during_cycle(), "invariant");
 853 
 854   // We are finishing up the current cycle by clearing the next
 855   // marking bitmap and getting it ready for the next cycle. During
 856   // this time no other cycle can start. So, let's make sure that this
 857   // is the case.
 858   guarantee(!g1h->mark_in_progress(), "invariant");
 859 
 860   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 861   g1h->heap_region_iterate(&cl);
 862 
 863   // Clear the liveness counting data. If the marking has been aborted, the abort()
 864   // call already did that.
 865   if (cl.complete()) {
 866     clear_all_count_data();
 867   }
 868 
 869   // Repeat the asserts from above.
 870   guarantee(cmThread()->during_cycle(), "invariant");
 871   guarantee(!g1h->mark_in_progress(), "invariant");
 872 }
 873 
 874 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 875   CMBitMap* _bitmap;
 876   bool _error;
 877  public:
 878   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 879   }
 880 
 881   virtual bool doHeapRegion(HeapRegion* r) {
 882     // This closure can be called concurrently to the mutator, so we must make sure
 883     // that the result of the getNextMarkedWordAddress() call is compared to the
 884     // value passed to it as limit to detect any found bits.
 885     // We can use the region's orig_end() for the limit and the comparison value
 886     // as it always contains the "real" end of the region that never changes and
 887     // has no side effects.
 888     // Due to the latter, there can also be no problem with the compiler generating
 889     // reloads of the orig_end() call.
 890     HeapWord* end = r->orig_end();
 891     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 892   }
 893 };
 894 
 895 bool ConcurrentMark::nextMarkBitmapIsClear() {
 896   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 897   _g1h->heap_region_iterate(&cl);
 898   return cl.complete();
 899 }
 900 
 901 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 902 public:
 903   bool doHeapRegion(HeapRegion* r) {
 904     if (!r->is_continues_humongous()) {
 905       r->note_start_of_marking();
 906     }
 907     return false;
 908   }
 909 };
 910 
 911 void ConcurrentMark::checkpointRootsInitialPre() {
 912   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 913   G1CollectorPolicy* g1p = g1h->g1_policy();
 914 
 915   _has_aborted = false;
 916 
 917 #ifndef PRODUCT
 918   if (G1PrintReachableAtInitialMark) {
 919     print_reachable("at-cycle-start",
 920                     VerifyOption_G1UsePrevMarking, true /* all */);
 921   }
 922 #endif
 923 
 924   // Initialize marking structures. This has to be done in a STW phase.
 925   reset();
 926 
 927   // For each region note start of marking.
 928   NoteStartOfMarkHRClosure startcl;
 929   g1h->heap_region_iterate(&startcl);
 930 }
 931 
 932 
 933 void ConcurrentMark::checkpointRootsInitialPost() {
 934   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 935 
 936   // If we force an overflow during remark, the remark operation will
 937   // actually abort and we'll restart concurrent marking. If we always
 938   // force an overflow during remark we'll never actually complete the
 939   // marking phase. So, we initialize this here, at the start of the
 940   // cycle, so that at the remaining overflow number will decrease at
 941   // every remark and we'll eventually not need to cause one.
 942   force_overflow_stw()->init();
 943 
 944   // Start Concurrent Marking weak-reference discovery.
 945   ReferenceProcessor* rp = g1h->ref_processor_cm();
 946   // enable ("weak") refs discovery
 947   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 948   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 949 
 950   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 951   // This is the start of  the marking cycle, we're expected all
 952   // threads to have SATB queues with active set to false.
 953   satb_mq_set.set_active_all_threads(true, /* new active value */
 954                                      false /* expected_active */);
 955 
 956   _root_regions.prepare_for_scan();
 957 
 958   // update_g1_committed() will be called at the end of an evac pause
 959   // when marking is on. So, it's also called at the end of the
 960   // initial-mark pause to update the heap end, if the heap expands
 961   // during it. No need to call it here.
 962 }
 963 
 964 /*
 965  * Notice that in the next two methods, we actually leave the STS
 966  * during the barrier sync and join it immediately afterwards. If we
 967  * do not do this, the following deadlock can occur: one thread could
 968  * be in the barrier sync code, waiting for the other thread to also
 969  * sync up, whereas another one could be trying to yield, while also
 970  * waiting for the other threads to sync up too.
 971  *
 972  * Note, however, that this code is also used during remark and in
 973  * this case we should not attempt to leave / enter the STS, otherwise
 974  * we'll either hit an assert (debug / fastdebug) or deadlock
 975  * (product). So we should only leave / enter the STS if we are
 976  * operating concurrently.
 977  *
 978  * Because the thread that does the sync barrier has left the STS, it
 979  * is possible to be suspended for a Full GC or an evacuation pause
 980  * could occur. This is actually safe, since the entering the sync
 981  * barrier is one of the last things do_marking_step() does, and it
 982  * doesn't manipulate any data structures afterwards.
 983  */
 984 
 985 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 986   if (verbose_low()) {
 987     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 988   }
 989 
 990   if (concurrent()) {
 991     SuspendibleThreadSet::leave();
 992   }
 993 
 994   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
 995 
 996   if (concurrent()) {
 997     SuspendibleThreadSet::join();
 998   }
 999   // at this point everyone should have synced up and not be doing any
1000   // more work
1001 
1002   if (verbose_low()) {
1003     if (barrier_aborted) {
1004       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1005     } else {
1006       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1007     }
1008   }
1009 
1010   if (barrier_aborted) {
1011     // If the barrier aborted we ignore the overflow condition and
1012     // just abort the whole marking phase as quickly as possible.
1013     return;
1014   }
1015 
1016   // If we're executing the concurrent phase of marking, reset the marking
1017   // state; otherwise the marking state is reset after reference processing,
1018   // during the remark pause.
1019   // If we reset here as a result of an overflow during the remark we will
1020   // see assertion failures from any subsequent set_concurrency_and_phase()
1021   // calls.
1022   if (concurrent()) {
1023     // let the task associated with with worker 0 do this
1024     if (worker_id == 0) {
1025       // task 0 is responsible for clearing the global data structures
1026       // We should be here because of an overflow. During STW we should
1027       // not clear the overflow flag since we rely on it being true when
1028       // we exit this method to abort the pause and restart concurrent
1029       // marking.
1030       reset_marking_state(true /* clear_overflow */);
1031       force_overflow()->update();
1032 
1033       if (G1Log::fine()) {
1034         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1035         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1036       }
1037     }
1038   }
1039 
1040   // after this, each task should reset its own data structures then
1041   // then go into the second barrier
1042 }
1043 
1044 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1045   if (verbose_low()) {
1046     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1047   }
1048 
1049   if (concurrent()) {
1050     SuspendibleThreadSet::leave();
1051   }
1052 
1053   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1054 
1055   if (concurrent()) {
1056     SuspendibleThreadSet::join();
1057   }
1058   // at this point everything should be re-initialized and ready to go
1059 
1060   if (verbose_low()) {
1061     if (barrier_aborted) {
1062       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1063     } else {
1064       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1065     }
1066   }
1067 }
1068 
1069 #ifndef PRODUCT
1070 void ForceOverflowSettings::init() {
1071   _num_remaining = G1ConcMarkForceOverflow;
1072   _force = false;
1073   update();
1074 }
1075 
1076 void ForceOverflowSettings::update() {
1077   if (_num_remaining > 0) {
1078     _num_remaining -= 1;
1079     _force = true;
1080   } else {
1081     _force = false;
1082   }
1083 }
1084 
1085 bool ForceOverflowSettings::should_force() {
1086   if (_force) {
1087     _force = false;
1088     return true;
1089   } else {
1090     return false;
1091   }
1092 }
1093 #endif // !PRODUCT
1094 
1095 class CMConcurrentMarkingTask: public AbstractGangTask {
1096 private:
1097   ConcurrentMark*       _cm;
1098   ConcurrentMarkThread* _cmt;
1099 
1100 public:
1101   void work(uint worker_id) {
1102     assert(Thread::current()->is_ConcurrentGC_thread(),
1103            "this should only be done by a conc GC thread");
1104     ResourceMark rm;
1105 
1106     double start_vtime = os::elapsedVTime();
1107 
1108     SuspendibleThreadSet::join();
1109 
1110     assert(worker_id < _cm->active_tasks(), "invariant");
1111     CMTask* the_task = _cm->task(worker_id);
1112     the_task->record_start_time();
1113     if (!_cm->has_aborted()) {
1114       do {
1115         double start_vtime_sec = os::elapsedVTime();
1116         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1117 
1118         the_task->do_marking_step(mark_step_duration_ms,
1119                                   true  /* do_termination */,
1120                                   false /* is_serial*/);
1121 
1122         double end_vtime_sec = os::elapsedVTime();
1123         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1124         _cm->clear_has_overflown();
1125 
1126         _cm->do_yield_check(worker_id);
1127 
1128         jlong sleep_time_ms;
1129         if (!_cm->has_aborted() && the_task->has_aborted()) {
1130           sleep_time_ms =
1131             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1132           SuspendibleThreadSet::leave();
1133           os::sleep(Thread::current(), sleep_time_ms, false);
1134           SuspendibleThreadSet::join();
1135         }
1136       } while (!_cm->has_aborted() && the_task->has_aborted());
1137     }
1138     the_task->record_end_time();
1139     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1140 
1141     SuspendibleThreadSet::leave();
1142 
1143     double end_vtime = os::elapsedVTime();
1144     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1145   }
1146 
1147   CMConcurrentMarkingTask(ConcurrentMark* cm,
1148                           ConcurrentMarkThread* cmt) :
1149       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1150 
1151   ~CMConcurrentMarkingTask() { }
1152 };
1153 
1154 // Calculates the number of active workers for a concurrent
1155 // phase.
1156 uint ConcurrentMark::calc_parallel_marking_threads() {
1157   uint n_conc_workers = 0;
1158   if (!UseDynamicNumberOfGCThreads ||
1159       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1160        !ForceDynamicNumberOfGCThreads)) {
1161     n_conc_workers = max_parallel_marking_threads();
1162   } else {
1163     n_conc_workers =
1164       AdaptiveSizePolicy::calc_default_active_workers(
1165                                    max_parallel_marking_threads(),
1166                                    1, /* Minimum workers */
1167                                    parallel_marking_threads(),
1168                                    Threads::number_of_non_daemon_threads());
1169     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1170     // that scaling has already gone into "_max_parallel_marking_threads".
1171   }
1172   assert(n_conc_workers > 0, "Always need at least 1");
1173   return n_conc_workers;
1174 }
1175 
1176 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1177   // Currently, only survivors can be root regions.
1178   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1179   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1180 
1181   const uintx interval = PrefetchScanIntervalInBytes;
1182   HeapWord* curr = hr->bottom();
1183   const HeapWord* end = hr->top();
1184   while (curr < end) {
1185     Prefetch::read(curr, interval);
1186     oop obj = oop(curr);
1187     int size = obj->oop_iterate(&cl);
1188     assert(size == obj->size(), "sanity");
1189     curr += size;
1190   }
1191 }
1192 
1193 class CMRootRegionScanTask : public AbstractGangTask {
1194 private:
1195   ConcurrentMark* _cm;
1196 
1197 public:
1198   CMRootRegionScanTask(ConcurrentMark* cm) :
1199     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1200 
1201   void work(uint worker_id) {
1202     assert(Thread::current()->is_ConcurrentGC_thread(),
1203            "this should only be done by a conc GC thread");
1204 
1205     CMRootRegions* root_regions = _cm->root_regions();
1206     HeapRegion* hr = root_regions->claim_next();
1207     while (hr != NULL) {
1208       _cm->scanRootRegion(hr, worker_id);
1209       hr = root_regions->claim_next();
1210     }
1211   }
1212 };
1213 
1214 void ConcurrentMark::scanRootRegions() {
1215   // Start of concurrent marking.
1216   ClassLoaderDataGraph::clear_claimed_marks();
1217 
1218   // scan_in_progress() will have been set to true only if there was
1219   // at least one root region to scan. So, if it's false, we
1220   // should not attempt to do any further work.
1221   if (root_regions()->scan_in_progress()) {
1222     _parallel_marking_threads = calc_parallel_marking_threads();
1223     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1224            "Maximum number of marking threads exceeded");
1225     uint active_workers = MAX2(1U, parallel_marking_threads());
1226 
1227     CMRootRegionScanTask task(this);
1228     if (use_parallel_marking_threads()) {
1229       _parallel_workers->set_active_workers((int) active_workers);
1230       _parallel_workers->run_task(&task);
1231     } else {
1232       task.work(0);
1233     }
1234 
1235     // It's possible that has_aborted() is true here without actually
1236     // aborting the survivor scan earlier. This is OK as it's
1237     // mainly used for sanity checking.
1238     root_regions()->scan_finished();
1239   }
1240 }
1241 
1242 void ConcurrentMark::markFromRoots() {
1243   // we might be tempted to assert that:
1244   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1245   //        "inconsistent argument?");
1246   // However that wouldn't be right, because it's possible that
1247   // a safepoint is indeed in progress as a younger generation
1248   // stop-the-world GC happens even as we mark in this generation.
1249 
1250   _restart_for_overflow = false;
1251   force_overflow_conc()->init();
1252 
1253   // _g1h has _n_par_threads
1254   _parallel_marking_threads = calc_parallel_marking_threads();
1255   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1256     "Maximum number of marking threads exceeded");
1257 
1258   uint active_workers = MAX2(1U, parallel_marking_threads());
1259 
1260   // Parallel task terminator is set in "set_concurrency_and_phase()"
1261   set_concurrency_and_phase(active_workers, true /* concurrent */);
1262 
1263   CMConcurrentMarkingTask markingTask(this, cmThread());
1264   if (use_parallel_marking_threads()) {
1265     _parallel_workers->set_active_workers((int)active_workers);
1266     // Don't set _n_par_threads because it affects MT in process_roots()
1267     // and the decisions on that MT processing is made elsewhere.
1268     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1269     _parallel_workers->run_task(&markingTask);
1270   } else {
1271     markingTask.work(0);
1272   }
1273   print_stats();
1274 }
1275 
1276 // Helper class to get rid of some boilerplate code.
1277 class G1CMTraceTime : public GCTraceTime {
1278   static bool doit_and_prepend(bool doit) {
1279     if (doit) {
1280       gclog_or_tty->put(' ');
1281     }
1282     return doit;
1283   }
1284 
1285  public:
1286   G1CMTraceTime(const char* title, bool doit)
1287     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1288         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1289   }
1290 };
1291 
1292 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1293   // world is stopped at this checkpoint
1294   assert(SafepointSynchronize::is_at_safepoint(),
1295          "world should be stopped");
1296 
1297   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1298 
1299   // If a full collection has happened, we shouldn't do this.
1300   if (has_aborted()) {
1301     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1302     return;
1303   }
1304 
1305   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1306 
1307   if (VerifyDuringGC) {
1308     HandleMark hm;  // handle scope
1309     Universe::heap()->prepare_for_verify();
1310     Universe::verify(VerifyOption_G1UsePrevMarking,
1311                      " VerifyDuringGC:(before)");
1312   }
1313   g1h->check_bitmaps("Remark Start");
1314 
1315   G1CollectorPolicy* g1p = g1h->g1_policy();
1316   g1p->record_concurrent_mark_remark_start();
1317 
1318   double start = os::elapsedTime();
1319 
1320   checkpointRootsFinalWork();
1321 
1322   double mark_work_end = os::elapsedTime();
1323 
1324   weakRefsWork(clear_all_soft_refs);
1325 
1326   if (has_overflown()) {
1327     // Oops.  We overflowed.  Restart concurrent marking.
1328     _restart_for_overflow = true;
1329     if (G1TraceMarkStackOverflow) {
1330       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1331     }
1332 
1333     // Verify the heap w.r.t. the previous marking bitmap.
1334     if (VerifyDuringGC) {
1335       HandleMark hm;  // handle scope
1336       Universe::heap()->prepare_for_verify();
1337       Universe::verify(VerifyOption_G1UsePrevMarking,
1338                        " VerifyDuringGC:(overflow)");
1339     }
1340 
1341     // Clear the marking state because we will be restarting
1342     // marking due to overflowing the global mark stack.
1343     reset_marking_state();
1344   } else {
1345     {
1346       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1347 
1348       // Aggregate the per-task counting data that we have accumulated
1349       // while marking.
1350       aggregate_count_data();
1351     }
1352 
1353     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1354     // We're done with marking.
1355     // This is the end of  the marking cycle, we're expected all
1356     // threads to have SATB queues with active set to true.
1357     satb_mq_set.set_active_all_threads(false, /* new active value */
1358                                        true /* expected_active */);
1359 
1360     if (VerifyDuringGC) {
1361       HandleMark hm;  // handle scope
1362       Universe::heap()->prepare_for_verify();
1363       Universe::verify(VerifyOption_G1UseNextMarking,
1364                        " VerifyDuringGC:(after)");
1365     }
1366     g1h->check_bitmaps("Remark End");
1367     assert(!restart_for_overflow(), "sanity");
1368     // Completely reset the marking state since marking completed
1369     set_non_marking_state();
1370   }
1371 
1372   // Expand the marking stack, if we have to and if we can.
1373   if (_markStack.should_expand()) {
1374     _markStack.expand();
1375   }
1376 
1377   // Statistics
1378   double now = os::elapsedTime();
1379   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1380   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1381   _remark_times.add((now - start) * 1000.0);
1382 
1383   g1p->record_concurrent_mark_remark_end();
1384 
1385   G1CMIsAliveClosure is_alive(g1h);
1386   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1387 }
1388 
1389 // Base class of the closures that finalize and verify the
1390 // liveness counting data.
1391 class CMCountDataClosureBase: public HeapRegionClosure {
1392 protected:
1393   G1CollectedHeap* _g1h;
1394   ConcurrentMark* _cm;
1395   CardTableModRefBS* _ct_bs;
1396 
1397   BitMap* _region_bm;
1398   BitMap* _card_bm;
1399 
1400   // Takes a region that's not empty (i.e., it has at least one
1401   // live object in it and sets its corresponding bit on the region
1402   // bitmap to 1. If the region is "starts humongous" it will also set
1403   // to 1 the bits on the region bitmap that correspond to its
1404   // associated "continues humongous" regions.
1405   void set_bit_for_region(HeapRegion* hr) {
1406     assert(!hr->is_continues_humongous(), "should have filtered those out");
1407 
1408     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1409     if (!hr->is_starts_humongous()) {
1410       // Normal (non-humongous) case: just set the bit.
1411       _region_bm->par_at_put(index, true);
1412     } else {
1413       // Starts humongous case: calculate how many regions are part of
1414       // this humongous region and then set the bit range.
1415       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1416       _region_bm->par_at_put_range(index, end_index, true);
1417     }
1418   }
1419 
1420 public:
1421   CMCountDataClosureBase(G1CollectedHeap* g1h,
1422                          BitMap* region_bm, BitMap* card_bm):
1423     _g1h(g1h), _cm(g1h->concurrent_mark()),
1424     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1425     _region_bm(region_bm), _card_bm(card_bm) { }
1426 };
1427 
1428 // Closure that calculates the # live objects per region. Used
1429 // for verification purposes during the cleanup pause.
1430 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1431   CMBitMapRO* _bm;
1432   size_t _region_marked_bytes;
1433 
1434 public:
1435   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1436                          BitMap* region_bm, BitMap* card_bm) :
1437     CMCountDataClosureBase(g1h, region_bm, card_bm),
1438     _bm(bm), _region_marked_bytes(0) { }
1439 
1440   bool doHeapRegion(HeapRegion* hr) {
1441 
1442     if (hr->is_continues_humongous()) {
1443       // We will ignore these here and process them when their
1444       // associated "starts humongous" region is processed (see
1445       // set_bit_for_heap_region()). Note that we cannot rely on their
1446       // associated "starts humongous" region to have their bit set to
1447       // 1 since, due to the region chunking in the parallel region
1448       // iteration, a "continues humongous" region might be visited
1449       // before its associated "starts humongous".
1450       return false;
1451     }
1452 
1453     HeapWord* ntams = hr->next_top_at_mark_start();
1454     HeapWord* start = hr->bottom();
1455 
1456     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1457            err_msg("Preconditions not met - "
1458                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1459                    p2i(start), p2i(ntams), p2i(hr->end())));
1460 
1461     // Find the first marked object at or after "start".
1462     start = _bm->getNextMarkedWordAddress(start, ntams);
1463 
1464     size_t marked_bytes = 0;
1465 
1466     while (start < ntams) {
1467       oop obj = oop(start);
1468       int obj_sz = obj->size();
1469       HeapWord* obj_end = start + obj_sz;
1470 
1471       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1472       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1473 
1474       // Note: if we're looking at the last region in heap - obj_end
1475       // could be actually just beyond the end of the heap; end_idx
1476       // will then correspond to a (non-existent) card that is also
1477       // just beyond the heap.
1478       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1479         // end of object is not card aligned - increment to cover
1480         // all the cards spanned by the object
1481         end_idx += 1;
1482       }
1483 
1484       // Set the bits in the card BM for the cards spanned by this object.
1485       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1486 
1487       // Add the size of this object to the number of marked bytes.
1488       marked_bytes += (size_t)obj_sz * HeapWordSize;
1489 
1490       // Find the next marked object after this one.
1491       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1492     }
1493 
1494     // Mark the allocated-since-marking portion...
1495     HeapWord* top = hr->top();
1496     if (ntams < top) {
1497       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1498       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1499 
1500       // Note: if we're looking at the last region in heap - top
1501       // could be actually just beyond the end of the heap; end_idx
1502       // will then correspond to a (non-existent) card that is also
1503       // just beyond the heap.
1504       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1505         // end of object is not card aligned - increment to cover
1506         // all the cards spanned by the object
1507         end_idx += 1;
1508       }
1509       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1510 
1511       // This definitely means the region has live objects.
1512       set_bit_for_region(hr);
1513     }
1514 
1515     // Update the live region bitmap.
1516     if (marked_bytes > 0) {
1517       set_bit_for_region(hr);
1518     }
1519 
1520     // Set the marked bytes for the current region so that
1521     // it can be queried by a calling verification routine
1522     _region_marked_bytes = marked_bytes;
1523 
1524     return false;
1525   }
1526 
1527   size_t region_marked_bytes() const { return _region_marked_bytes; }
1528 };
1529 
1530 // Heap region closure used for verifying the counting data
1531 // that was accumulated concurrently and aggregated during
1532 // the remark pause. This closure is applied to the heap
1533 // regions during the STW cleanup pause.
1534 
1535 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1536   G1CollectedHeap* _g1h;
1537   ConcurrentMark* _cm;
1538   CalcLiveObjectsClosure _calc_cl;
1539   BitMap* _region_bm;   // Region BM to be verified
1540   BitMap* _card_bm;     // Card BM to be verified
1541   bool _verbose;        // verbose output?
1542 
1543   BitMap* _exp_region_bm; // Expected Region BM values
1544   BitMap* _exp_card_bm;   // Expected card BM values
1545 
1546   int _failures;
1547 
1548 public:
1549   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1550                                 BitMap* region_bm,
1551                                 BitMap* card_bm,
1552                                 BitMap* exp_region_bm,
1553                                 BitMap* exp_card_bm,
1554                                 bool verbose) :
1555     _g1h(g1h), _cm(g1h->concurrent_mark()),
1556     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1557     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1558     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1559     _failures(0) { }
1560 
1561   int failures() const { return _failures; }
1562 
1563   bool doHeapRegion(HeapRegion* hr) {
1564     if (hr->is_continues_humongous()) {
1565       // We will ignore these here and process them when their
1566       // associated "starts humongous" region is processed (see
1567       // set_bit_for_heap_region()). Note that we cannot rely on their
1568       // associated "starts humongous" region to have their bit set to
1569       // 1 since, due to the region chunking in the parallel region
1570       // iteration, a "continues humongous" region might be visited
1571       // before its associated "starts humongous".
1572       return false;
1573     }
1574 
1575     int failures = 0;
1576 
1577     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1578     // this region and set the corresponding bits in the expected region
1579     // and card bitmaps.
1580     bool res = _calc_cl.doHeapRegion(hr);
1581     assert(res == false, "should be continuing");
1582 
1583     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1584                     Mutex::_no_safepoint_check_flag);
1585 
1586     // Verify the marked bytes for this region.
1587     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1588     size_t act_marked_bytes = hr->next_marked_bytes();
1589 
1590     // We're not OK if expected marked bytes > actual marked bytes. It means
1591     // we have missed accounting some objects during the actual marking.
1592     if (exp_marked_bytes > act_marked_bytes) {
1593       if (_verbose) {
1594         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1595                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1596                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1597       }
1598       failures += 1;
1599     }
1600 
1601     // Verify the bit, for this region, in the actual and expected
1602     // (which was just calculated) region bit maps.
1603     // We're not OK if the bit in the calculated expected region
1604     // bitmap is set and the bit in the actual region bitmap is not.
1605     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1606 
1607     bool expected = _exp_region_bm->at(index);
1608     bool actual = _region_bm->at(index);
1609     if (expected && !actual) {
1610       if (_verbose) {
1611         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1612                                "expected: %s, actual: %s",
1613                                hr->hrm_index(),
1614                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1615       }
1616       failures += 1;
1617     }
1618 
1619     // Verify that the card bit maps for the cards spanned by the current
1620     // region match. We have an error if we have a set bit in the expected
1621     // bit map and the corresponding bit in the actual bitmap is not set.
1622 
1623     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1624     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1625 
1626     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1627       expected = _exp_card_bm->at(i);
1628       actual = _card_bm->at(i);
1629 
1630       if (expected && !actual) {
1631         if (_verbose) {
1632           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1633                                  "expected: %s, actual: %s",
1634                                  hr->hrm_index(), i,
1635                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1636         }
1637         failures += 1;
1638       }
1639     }
1640 
1641     if (failures > 0 && _verbose)  {
1642       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1643                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1644                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1645                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1646     }
1647 
1648     _failures += failures;
1649 
1650     // We could stop iteration over the heap when we
1651     // find the first violating region by returning true.
1652     return false;
1653   }
1654 };
1655 
1656 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1657 protected:
1658   G1CollectedHeap* _g1h;
1659   ConcurrentMark* _cm;
1660   BitMap* _actual_region_bm;
1661   BitMap* _actual_card_bm;
1662 
1663   uint    _n_workers;
1664 
1665   BitMap* _expected_region_bm;
1666   BitMap* _expected_card_bm;
1667 
1668   int  _failures;
1669   bool _verbose;
1670 
1671   HeapRegionClaimer _hrclaimer;
1672 
1673 public:
1674   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1675                             BitMap* region_bm, BitMap* card_bm,
1676                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1677     : AbstractGangTask("G1 verify final counting"),
1678       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1679       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1680       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1681       _failures(0), _verbose(false),
1682       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1683     assert(VerifyDuringGC, "don't call this otherwise");
1684     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1685     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1686 
1687     _verbose = _cm->verbose_medium();
1688   }
1689 
1690   void work(uint worker_id) {
1691     assert(worker_id < _n_workers, "invariant");
1692 
1693     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1694                                             _actual_region_bm, _actual_card_bm,
1695                                             _expected_region_bm,
1696                                             _expected_card_bm,
1697                                             _verbose);
1698 
1699     _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
1700 
1701     Atomic::add(verify_cl.failures(), &_failures);
1702   }
1703 
1704   int failures() const { return _failures; }
1705 };
1706 
1707 // Closure that finalizes the liveness counting data.
1708 // Used during the cleanup pause.
1709 // Sets the bits corresponding to the interval [NTAMS, top]
1710 // (which contains the implicitly live objects) in the
1711 // card liveness bitmap. Also sets the bit for each region,
1712 // containing live data, in the region liveness bitmap.
1713 
1714 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1715  public:
1716   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1717                               BitMap* region_bm,
1718                               BitMap* card_bm) :
1719     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1720 
1721   bool doHeapRegion(HeapRegion* hr) {
1722 
1723     if (hr->is_continues_humongous()) {
1724       // We will ignore these here and process them when their
1725       // associated "starts humongous" region is processed (see
1726       // set_bit_for_heap_region()). Note that we cannot rely on their
1727       // associated "starts humongous" region to have their bit set to
1728       // 1 since, due to the region chunking in the parallel region
1729       // iteration, a "continues humongous" region might be visited
1730       // before its associated "starts humongous".
1731       return false;
1732     }
1733 
1734     HeapWord* ntams = hr->next_top_at_mark_start();
1735     HeapWord* top   = hr->top();
1736 
1737     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1738 
1739     // Mark the allocated-since-marking portion...
1740     if (ntams < top) {
1741       // This definitely means the region has live objects.
1742       set_bit_for_region(hr);
1743 
1744       // Now set the bits in the card bitmap for [ntams, top)
1745       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1746       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1747 
1748       // Note: if we're looking at the last region in heap - top
1749       // could be actually just beyond the end of the heap; end_idx
1750       // will then correspond to a (non-existent) card that is also
1751       // just beyond the heap.
1752       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1753         // end of object is not card aligned - increment to cover
1754         // all the cards spanned by the object
1755         end_idx += 1;
1756       }
1757 
1758       assert(end_idx <= _card_bm->size(),
1759              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1760                      end_idx, _card_bm->size()));
1761       assert(start_idx < _card_bm->size(),
1762              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1763                      start_idx, _card_bm->size()));
1764 
1765       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1766     }
1767 
1768     // Set the bit for the region if it contains live data
1769     if (hr->next_marked_bytes() > 0) {
1770       set_bit_for_region(hr);
1771     }
1772 
1773     return false;
1774   }
1775 };
1776 
1777 class G1ParFinalCountTask: public AbstractGangTask {
1778 protected:
1779   G1CollectedHeap* _g1h;
1780   ConcurrentMark* _cm;
1781   BitMap* _actual_region_bm;
1782   BitMap* _actual_card_bm;
1783 
1784   uint    _n_workers;
1785   HeapRegionClaimer _hrclaimer;
1786 
1787 public:
1788   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1789     : AbstractGangTask("G1 final counting"),
1790       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1791       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1792       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1793   }
1794 
1795   void work(uint worker_id) {
1796     assert(worker_id < _n_workers, "invariant");
1797 
1798     FinalCountDataUpdateClosure final_update_cl(_g1h,
1799                                                 _actual_region_bm,
1800                                                 _actual_card_bm);
1801 
1802     _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
1803   }
1804 };
1805 
1806 class G1ParNoteEndTask;
1807 
1808 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1809   G1CollectedHeap* _g1;
1810   size_t _max_live_bytes;
1811   uint _regions_claimed;
1812   size_t _freed_bytes;
1813   FreeRegionList* _local_cleanup_list;
1814   HeapRegionSetCount _old_regions_removed;
1815   HeapRegionSetCount _humongous_regions_removed;
1816   HRRSCleanupTask* _hrrs_cleanup_task;
1817   double _claimed_region_time;
1818   double _max_region_time;
1819 
1820 public:
1821   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1822                              FreeRegionList* local_cleanup_list,
1823                              HRRSCleanupTask* hrrs_cleanup_task) :
1824     _g1(g1),
1825     _max_live_bytes(0), _regions_claimed(0),
1826     _freed_bytes(0),
1827     _claimed_region_time(0.0), _max_region_time(0.0),
1828     _local_cleanup_list(local_cleanup_list),
1829     _old_regions_removed(),
1830     _humongous_regions_removed(),
1831     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1832 
1833   size_t freed_bytes() { return _freed_bytes; }
1834   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1835   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1836 
1837   bool doHeapRegion(HeapRegion *hr) {
1838     if (hr->is_continues_humongous()) {
1839       return false;
1840     }
1841     // We use a claim value of zero here because all regions
1842     // were claimed with value 1 in the FinalCount task.
1843     _g1->reset_gc_time_stamps(hr);
1844     double start = os::elapsedTime();
1845     _regions_claimed++;
1846     hr->note_end_of_marking();
1847     _max_live_bytes += hr->max_live_bytes();
1848 
1849     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1850       _freed_bytes += hr->used();
1851       hr->set_containing_set(NULL);
1852       if (hr->is_humongous()) {
1853         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1854         _humongous_regions_removed.increment(1u, hr->capacity());
1855         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1856       } else {
1857         _old_regions_removed.increment(1u, hr->capacity());
1858         _g1->free_region(hr, _local_cleanup_list, true);
1859       }
1860     } else {
1861       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1862     }
1863 
1864     double region_time = (os::elapsedTime() - start);
1865     _claimed_region_time += region_time;
1866     if (region_time > _max_region_time) {
1867       _max_region_time = region_time;
1868     }
1869     return false;
1870   }
1871 
1872   size_t max_live_bytes() { return _max_live_bytes; }
1873   uint regions_claimed() { return _regions_claimed; }
1874   double claimed_region_time_sec() { return _claimed_region_time; }
1875   double max_region_time_sec() { return _max_region_time; }
1876 };
1877 
1878 class G1ParNoteEndTask: public AbstractGangTask {
1879   friend class G1NoteEndOfConcMarkClosure;
1880 
1881 protected:
1882   G1CollectedHeap* _g1h;
1883   size_t _max_live_bytes;
1884   size_t _freed_bytes;
1885   FreeRegionList* _cleanup_list;
1886   HeapRegionClaimer _hrclaimer;
1887 
1888 public:
1889   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1890       AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1891   }
1892 
1893   void work(uint worker_id) {
1894     double start = os::elapsedTime();
1895     FreeRegionList local_cleanup_list("Local Cleanup List");
1896     HRRSCleanupTask hrrs_cleanup_task;
1897     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1898                                            &hrrs_cleanup_task);
1899     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1900     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1901 
1902     // Now update the lists
1903     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1904     {
1905       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1906       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1907       _max_live_bytes += g1_note_end.max_live_bytes();
1908       _freed_bytes += g1_note_end.freed_bytes();
1909 
1910       // If we iterate over the global cleanup list at the end of
1911       // cleanup to do this printing we will not guarantee to only
1912       // generate output for the newly-reclaimed regions (the list
1913       // might not be empty at the beginning of cleanup; we might
1914       // still be working on its previous contents). So we do the
1915       // printing here, before we append the new regions to the global
1916       // cleanup list.
1917 
1918       G1HRPrinter* hr_printer = _g1h->hr_printer();
1919       if (hr_printer->is_active()) {
1920         FreeRegionListIterator iter(&local_cleanup_list);
1921         while (iter.more_available()) {
1922           HeapRegion* hr = iter.get_next();
1923           hr_printer->cleanup(hr);
1924         }
1925       }
1926 
1927       _cleanup_list->add_ordered(&local_cleanup_list);
1928       assert(local_cleanup_list.is_empty(), "post-condition");
1929 
1930       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1931     }
1932   }
1933   size_t max_live_bytes() { return _max_live_bytes; }
1934   size_t freed_bytes() { return _freed_bytes; }
1935 };
1936 
1937 class G1ParScrubRemSetTask: public AbstractGangTask {
1938 protected:
1939   G1RemSet* _g1rs;
1940   BitMap* _region_bm;
1941   BitMap* _card_bm;
1942   HeapRegionClaimer _hrclaimer;
1943 
1944 public:
1945   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1946       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
1947   }
1948 
1949   void work(uint worker_id) {
1950     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
1951   }
1952 
1953 };
1954 
1955 void ConcurrentMark::cleanup() {
1956   // world is stopped at this checkpoint
1957   assert(SafepointSynchronize::is_at_safepoint(),
1958          "world should be stopped");
1959   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1960 
1961   // If a full collection has happened, we shouldn't do this.
1962   if (has_aborted()) {
1963     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1964     return;
1965   }
1966 
1967   g1h->verify_region_sets_optional();
1968 
1969   if (VerifyDuringGC) {
1970     HandleMark hm;  // handle scope
1971     Universe::heap()->prepare_for_verify();
1972     Universe::verify(VerifyOption_G1UsePrevMarking,
1973                      " VerifyDuringGC:(before)");
1974   }
1975   g1h->check_bitmaps("Cleanup Start");
1976 
1977   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1978   g1p->record_concurrent_mark_cleanup_start();
1979 
1980   double start = os::elapsedTime();
1981 
1982   HeapRegionRemSet::reset_for_cleanup_tasks();
1983 
1984   uint n_workers;
1985 
1986   // Do counting once more with the world stopped for good measure.
1987   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1988 
1989   g1h->set_par_threads();
1990   n_workers = g1h->n_par_threads();
1991   assert(g1h->n_par_threads() == n_workers,
1992          "Should not have been reset");
1993   g1h->workers()->run_task(&g1_par_count_task);
1994   // Done with the parallel phase so reset to 0.
1995   g1h->set_par_threads(0);
1996 
1997   if (VerifyDuringGC) {
1998     // Verify that the counting data accumulated during marking matches
1999     // that calculated by walking the marking bitmap.
2000 
2001     // Bitmaps to hold expected values
2002     BitMap expected_region_bm(_region_bm.size(), true);
2003     BitMap expected_card_bm(_card_bm.size(), true);
2004 
2005     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2006                                                  &_region_bm,
2007                                                  &_card_bm,
2008                                                  &expected_region_bm,
2009                                                  &expected_card_bm);
2010 
2011     g1h->set_par_threads((int)n_workers);
2012     g1h->workers()->run_task(&g1_par_verify_task);
2013     // Done with the parallel phase so reset to 0.
2014     g1h->set_par_threads(0);
2015 
2016     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2017   }
2018 
2019   size_t start_used_bytes = g1h->used();
2020   g1h->set_marking_complete();
2021 
2022   double count_end = os::elapsedTime();
2023   double this_final_counting_time = (count_end - start);
2024   _total_counting_time += this_final_counting_time;
2025 
2026   if (G1PrintRegionLivenessInfo) {
2027     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2028     _g1h->heap_region_iterate(&cl);
2029   }
2030 
2031   // Install newly created mark bitMap as "prev".
2032   swapMarkBitMaps();
2033 
2034   g1h->reset_gc_time_stamp();
2035 
2036   // Note end of marking in all heap regions.
2037   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
2038   g1h->set_par_threads((int)n_workers);
2039   g1h->workers()->run_task(&g1_par_note_end_task);
2040   g1h->set_par_threads(0);
2041   g1h->check_gc_time_stamps();
2042 
2043   if (!cleanup_list_is_empty()) {
2044     // The cleanup list is not empty, so we'll have to process it
2045     // concurrently. Notify anyone else that might be wanting free
2046     // regions that there will be more free regions coming soon.
2047     g1h->set_free_regions_coming();
2048   }
2049 
2050   // call below, since it affects the metric by which we sort the heap
2051   // regions.
2052   if (G1ScrubRemSets) {
2053     double rs_scrub_start = os::elapsedTime();
2054     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2055     g1h->set_par_threads((int)n_workers);
2056     g1h->workers()->run_task(&g1_par_scrub_rs_task);
2057     g1h->set_par_threads(0);
2058 
2059     double rs_scrub_end = os::elapsedTime();
2060     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2061     _total_rs_scrub_time += this_rs_scrub_time;
2062   }
2063 
2064   // this will also free any regions totally full of garbage objects,
2065   // and sort the regions.
2066   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2067 
2068   // Statistics.
2069   double end = os::elapsedTime();
2070   _cleanup_times.add((end - start) * 1000.0);
2071 
2072   if (G1Log::fine()) {
2073     g1h->print_size_transition(gclog_or_tty,
2074                                start_used_bytes,
2075                                g1h->used(),
2076                                g1h->capacity());
2077   }
2078 
2079   // Clean up will have freed any regions completely full of garbage.
2080   // Update the soft reference policy with the new heap occupancy.
2081   Universe::update_heap_info_at_gc();
2082 
2083   if (VerifyDuringGC) {
2084     HandleMark hm;  // handle scope
2085     Universe::heap()->prepare_for_verify();
2086     Universe::verify(VerifyOption_G1UsePrevMarking,
2087                      " VerifyDuringGC:(after)");
2088   }
2089 
2090   g1h->check_bitmaps("Cleanup End");
2091 
2092   g1h->verify_region_sets_optional();
2093 
2094   // We need to make this be a "collection" so any collection pause that
2095   // races with it goes around and waits for completeCleanup to finish.
2096   g1h->increment_total_collections();
2097 
2098   // Clean out dead classes and update Metaspace sizes.
2099   if (ClassUnloadingWithConcurrentMark) {
2100     ClassLoaderDataGraph::purge();
2101   }
2102   MetaspaceGC::compute_new_size();
2103 
2104   // We reclaimed old regions so we should calculate the sizes to make
2105   // sure we update the old gen/space data.
2106   g1h->g1mm()->update_sizes();
2107 
2108   g1h->trace_heap_after_concurrent_cycle();
2109 }
2110 
2111 void ConcurrentMark::completeCleanup() {
2112   if (has_aborted()) return;
2113 
2114   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2115 
2116   _cleanup_list.verify_optional();
2117   FreeRegionList tmp_free_list("Tmp Free List");
2118 
2119   if (G1ConcRegionFreeingVerbose) {
2120     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2121                            "cleanup list has %u entries",
2122                            _cleanup_list.length());
2123   }
2124 
2125   // No one else should be accessing the _cleanup_list at this point,
2126   // so it is not necessary to take any locks
2127   while (!_cleanup_list.is_empty()) {
2128     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2129     assert(hr != NULL, "Got NULL from a non-empty list");
2130     hr->par_clear();
2131     tmp_free_list.add_ordered(hr);
2132 
2133     // Instead of adding one region at a time to the secondary_free_list,
2134     // we accumulate them in the local list and move them a few at a
2135     // time. This also cuts down on the number of notify_all() calls
2136     // we do during this process. We'll also append the local list when
2137     // _cleanup_list is empty (which means we just removed the last
2138     // region from the _cleanup_list).
2139     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2140         _cleanup_list.is_empty()) {
2141       if (G1ConcRegionFreeingVerbose) {
2142         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2143                                "appending %u entries to the secondary_free_list, "
2144                                "cleanup list still has %u entries",
2145                                tmp_free_list.length(),
2146                                _cleanup_list.length());
2147       }
2148 
2149       {
2150         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2151         g1h->secondary_free_list_add(&tmp_free_list);
2152         SecondaryFreeList_lock->notify_all();
2153       }
2154 
2155       if (G1StressConcRegionFreeing) {
2156         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2157           os::sleep(Thread::current(), (jlong) 1, false);
2158         }
2159       }
2160     }
2161   }
2162   assert(tmp_free_list.is_empty(), "post-condition");
2163 }
2164 
2165 // Supporting Object and Oop closures for reference discovery
2166 // and processing in during marking
2167 
2168 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2169   HeapWord* addr = (HeapWord*)obj;
2170   return addr != NULL &&
2171          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2172 }
2173 
2174 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2175 // Uses the CMTask associated with a worker thread (for serial reference
2176 // processing the CMTask for worker 0 is used) to preserve (mark) and
2177 // trace referent objects.
2178 //
2179 // Using the CMTask and embedded local queues avoids having the worker
2180 // threads operating on the global mark stack. This reduces the risk
2181 // of overflowing the stack - which we would rather avoid at this late
2182 // state. Also using the tasks' local queues removes the potential
2183 // of the workers interfering with each other that could occur if
2184 // operating on the global stack.
2185 
2186 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2187   ConcurrentMark* _cm;
2188   CMTask*         _task;
2189   int             _ref_counter_limit;
2190   int             _ref_counter;
2191   bool            _is_serial;
2192  public:
2193   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2194     _cm(cm), _task(task), _is_serial(is_serial),
2195     _ref_counter_limit(G1RefProcDrainInterval) {
2196     assert(_ref_counter_limit > 0, "sanity");
2197     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2198     _ref_counter = _ref_counter_limit;
2199   }
2200 
2201   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2202   virtual void do_oop(      oop* p) { do_oop_work(p); }
2203 
2204   template <class T> void do_oop_work(T* p) {
2205     if (!_cm->has_overflown()) {
2206       oop obj = oopDesc::load_decode_heap_oop(p);
2207       if (_cm->verbose_high()) {
2208         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2209                                "*"PTR_FORMAT" = "PTR_FORMAT,
2210                                _task->worker_id(), p2i(p), p2i((void*) obj));
2211       }
2212 
2213       _task->deal_with_reference(obj);
2214       _ref_counter--;
2215 
2216       if (_ref_counter == 0) {
2217         // We have dealt with _ref_counter_limit references, pushing them
2218         // and objects reachable from them on to the local stack (and
2219         // possibly the global stack). Call CMTask::do_marking_step() to
2220         // process these entries.
2221         //
2222         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2223         // there's nothing more to do (i.e. we're done with the entries that
2224         // were pushed as a result of the CMTask::deal_with_reference() calls
2225         // above) or we overflow.
2226         //
2227         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2228         // flag while there may still be some work to do. (See the comment at
2229         // the beginning of CMTask::do_marking_step() for those conditions -
2230         // one of which is reaching the specified time target.) It is only
2231         // when CMTask::do_marking_step() returns without setting the
2232         // has_aborted() flag that the marking step has completed.
2233         do {
2234           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2235           _task->do_marking_step(mark_step_duration_ms,
2236                                  false      /* do_termination */,
2237                                  _is_serial);
2238         } while (_task->has_aborted() && !_cm->has_overflown());
2239         _ref_counter = _ref_counter_limit;
2240       }
2241     } else {
2242       if (_cm->verbose_high()) {
2243          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2244       }
2245     }
2246   }
2247 };
2248 
2249 // 'Drain' oop closure used by both serial and parallel reference processing.
2250 // Uses the CMTask associated with a given worker thread (for serial
2251 // reference processing the CMtask for worker 0 is used). Calls the
2252 // do_marking_step routine, with an unbelievably large timeout value,
2253 // to drain the marking data structures of the remaining entries
2254 // added by the 'keep alive' oop closure above.
2255 
2256 class G1CMDrainMarkingStackClosure: public VoidClosure {
2257   ConcurrentMark* _cm;
2258   CMTask*         _task;
2259   bool            _is_serial;
2260  public:
2261   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2262     _cm(cm), _task(task), _is_serial(is_serial) {
2263     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2264   }
2265 
2266   void do_void() {
2267     do {
2268       if (_cm->verbose_high()) {
2269         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2270                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2271       }
2272 
2273       // We call CMTask::do_marking_step() to completely drain the local
2274       // and global marking stacks of entries pushed by the 'keep alive'
2275       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2276       //
2277       // CMTask::do_marking_step() is called in a loop, which we'll exit
2278       // if there's nothing more to do (i.e. we've completely drained the
2279       // entries that were pushed as a a result of applying the 'keep alive'
2280       // closure to the entries on the discovered ref lists) or we overflow
2281       // the global marking stack.
2282       //
2283       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2284       // flag while there may still be some work to do. (See the comment at
2285       // the beginning of CMTask::do_marking_step() for those conditions -
2286       // one of which is reaching the specified time target.) It is only
2287       // when CMTask::do_marking_step() returns without setting the
2288       // has_aborted() flag that the marking step has completed.
2289 
2290       _task->do_marking_step(1000000000.0 /* something very large */,
2291                              true         /* do_termination */,
2292                              _is_serial);
2293     } while (_task->has_aborted() && !_cm->has_overflown());
2294   }
2295 };
2296 
2297 // Implementation of AbstractRefProcTaskExecutor for parallel
2298 // reference processing at the end of G1 concurrent marking
2299 
2300 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2301 private:
2302   G1CollectedHeap* _g1h;
2303   ConcurrentMark*  _cm;
2304   WorkGang*        _workers;
2305   int              _active_workers;
2306 
2307 public:
2308   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2309                         ConcurrentMark* cm,
2310                         WorkGang* workers,
2311                         int n_workers) :
2312     _g1h(g1h), _cm(cm),
2313     _workers(workers), _active_workers(n_workers) { }
2314 
2315   // Executes the given task using concurrent marking worker threads.
2316   virtual void execute(ProcessTask& task);
2317   virtual void execute(EnqueueTask& task);
2318 };
2319 
2320 class G1CMRefProcTaskProxy: public AbstractGangTask {
2321   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2322   ProcessTask&     _proc_task;
2323   G1CollectedHeap* _g1h;
2324   ConcurrentMark*  _cm;
2325 
2326 public:
2327   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2328                      G1CollectedHeap* g1h,
2329                      ConcurrentMark* cm) :
2330     AbstractGangTask("Process reference objects in parallel"),
2331     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2332     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2333     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2334   }
2335 
2336   virtual void work(uint worker_id) {
2337     ResourceMark rm;
2338     HandleMark hm;
2339     CMTask* task = _cm->task(worker_id);
2340     G1CMIsAliveClosure g1_is_alive(_g1h);
2341     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2342     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2343 
2344     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2345   }
2346 };
2347 
2348 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2349   assert(_workers != NULL, "Need parallel worker threads.");
2350   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2351 
2352   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2353 
2354   // We need to reset the concurrency level before each
2355   // proxy task execution, so that the termination protocol
2356   // and overflow handling in CMTask::do_marking_step() knows
2357   // how many workers to wait for.
2358   _cm->set_concurrency(_active_workers);
2359   _g1h->set_par_threads(_active_workers);
2360   _workers->run_task(&proc_task_proxy);
2361   _g1h->set_par_threads(0);
2362 }
2363 
2364 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2365   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2366   EnqueueTask& _enq_task;
2367 
2368 public:
2369   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2370     AbstractGangTask("Enqueue reference objects in parallel"),
2371     _enq_task(enq_task) { }
2372 
2373   virtual void work(uint worker_id) {
2374     _enq_task.work(worker_id);
2375   }
2376 };
2377 
2378 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2379   assert(_workers != NULL, "Need parallel worker threads.");
2380   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2381 
2382   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2383 
2384   // Not strictly necessary but...
2385   //
2386   // We need to reset the concurrency level before each
2387   // proxy task execution, so that the termination protocol
2388   // and overflow handling in CMTask::do_marking_step() knows
2389   // how many workers to wait for.
2390   _cm->set_concurrency(_active_workers);
2391   _g1h->set_par_threads(_active_workers);
2392   _workers->run_task(&enq_task_proxy);
2393   _g1h->set_par_threads(0);
2394 }
2395 
2396 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2397   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2398 }
2399 
2400 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2401   if (has_overflown()) {
2402     // Skip processing the discovered references if we have
2403     // overflown the global marking stack. Reference objects
2404     // only get discovered once so it is OK to not
2405     // de-populate the discovered reference lists. We could have,
2406     // but the only benefit would be that, when marking restarts,
2407     // less reference objects are discovered.
2408     return;
2409   }
2410 
2411   ResourceMark rm;
2412   HandleMark   hm;
2413 
2414   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2415 
2416   // Is alive closure.
2417   G1CMIsAliveClosure g1_is_alive(g1h);
2418 
2419   // Inner scope to exclude the cleaning of the string and symbol
2420   // tables from the displayed time.
2421   {
2422     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2423 
2424     ReferenceProcessor* rp = g1h->ref_processor_cm();
2425 
2426     // See the comment in G1CollectedHeap::ref_processing_init()
2427     // about how reference processing currently works in G1.
2428 
2429     // Set the soft reference policy
2430     rp->setup_policy(clear_all_soft_refs);
2431     assert(_markStack.isEmpty(), "mark stack should be empty");
2432 
2433     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2434     // in serial reference processing. Note these closures are also
2435     // used for serially processing (by the the current thread) the
2436     // JNI references during parallel reference processing.
2437     //
2438     // These closures do not need to synchronize with the worker
2439     // threads involved in parallel reference processing as these
2440     // instances are executed serially by the current thread (e.g.
2441     // reference processing is not multi-threaded and is thus
2442     // performed by the current thread instead of a gang worker).
2443     //
2444     // The gang tasks involved in parallel reference processing create
2445     // their own instances of these closures, which do their own
2446     // synchronization among themselves.
2447     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2448     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2449 
2450     // We need at least one active thread. If reference processing
2451     // is not multi-threaded we use the current (VMThread) thread,
2452     // otherwise we use the work gang from the G1CollectedHeap and
2453     // we utilize all the worker threads we can.
2454     bool processing_is_mt = rp->processing_is_mt();
2455     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2456     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2457 
2458     // Parallel processing task executor.
2459     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2460                                               g1h->workers(), active_workers);
2461     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2462 
2463     // Set the concurrency level. The phase was already set prior to
2464     // executing the remark task.
2465     set_concurrency(active_workers);
2466 
2467     // Set the degree of MT processing here.  If the discovery was done MT,
2468     // the number of threads involved during discovery could differ from
2469     // the number of active workers.  This is OK as long as the discovered
2470     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2471     rp->set_active_mt_degree(active_workers);
2472 
2473     // Process the weak references.
2474     const ReferenceProcessorStats& stats =
2475         rp->process_discovered_references(&g1_is_alive,
2476                                           &g1_keep_alive,
2477                                           &g1_drain_mark_stack,
2478                                           executor,
2479                                           g1h->gc_timer_cm(),
2480                                           concurrent_gc_id());
2481     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2482 
2483     // The do_oop work routines of the keep_alive and drain_marking_stack
2484     // oop closures will set the has_overflown flag if we overflow the
2485     // global marking stack.
2486 
2487     assert(_markStack.overflow() || _markStack.isEmpty(),
2488             "mark stack should be empty (unless it overflowed)");
2489 
2490     if (_markStack.overflow()) {
2491       // This should have been done already when we tried to push an
2492       // entry on to the global mark stack. But let's do it again.
2493       set_has_overflown();
2494     }
2495 
2496     assert(rp->num_q() == active_workers, "why not");
2497 
2498     rp->enqueue_discovered_references(executor);
2499 
2500     rp->verify_no_references_recorded();
2501     assert(!rp->discovery_enabled(), "Post condition");
2502   }
2503 
2504   if (has_overflown()) {
2505     // We can not trust g1_is_alive if the marking stack overflowed
2506     return;
2507   }
2508 
2509   assert(_markStack.isEmpty(), "Marking should have completed");
2510 
2511   // Unload Klasses, String, Symbols, Code Cache, etc.
2512   {
2513     G1CMTraceTime trace("Unloading", G1Log::finer());
2514 
2515     if (ClassUnloadingWithConcurrentMark) {
2516       bool purged_classes;
2517 
2518       {
2519         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2520         purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
2521       }
2522 
2523       {
2524         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2525         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2526       }
2527     }
2528 
2529     if (G1StringDedup::is_enabled()) {
2530       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2531       G1StringDedup::unlink(&g1_is_alive);
2532     }
2533   }
2534 }
2535 
2536 void ConcurrentMark::swapMarkBitMaps() {
2537   CMBitMapRO* temp = _prevMarkBitMap;
2538   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2539   _nextMarkBitMap  = (CMBitMap*)  temp;
2540 }
2541 
2542 class CMObjectClosure;
2543 
2544 // Closure for iterating over objects, currently only used for
2545 // processing SATB buffers.
2546 class CMObjectClosure : public ObjectClosure {
2547 private:
2548   CMTask* _task;
2549 
2550 public:
2551   void do_object(oop obj) {
2552     _task->deal_with_reference(obj);
2553   }
2554 
2555   CMObjectClosure(CMTask* task) : _task(task) { }
2556 };
2557 
2558 class G1RemarkThreadsClosure : public ThreadClosure {
2559   CMObjectClosure _cm_obj;
2560   G1CMOopClosure _cm_cl;
2561   MarkingCodeBlobClosure _code_cl;
2562   int _thread_parity;
2563 
2564  public:
2565   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) :
2566     _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2567     _thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
2568 
2569   void do_thread(Thread* thread) {
2570     if (thread->is_Java_thread()) {
2571       if (thread->claim_oops_do(true, _thread_parity)) {
2572         JavaThread* jt = (JavaThread*)thread;
2573 
2574         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2575         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2576         // * Alive if on the stack of an executing method
2577         // * Weakly reachable otherwise
2578         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2579         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2580         jt->nmethods_do(&_code_cl);
2581 
2582         jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2583       }
2584     } else if (thread->is_VM_thread()) {
2585       if (thread->claim_oops_do(true, _thread_parity)) {
2586         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2587       }
2588     }
2589   }
2590 };
2591 
2592 class CMRemarkTask: public AbstractGangTask {
2593 private:
2594   ConcurrentMark* _cm;
2595 public:
2596   void work(uint worker_id) {
2597     // Since all available tasks are actually started, we should
2598     // only proceed if we're supposed to be active.
2599     if (worker_id < _cm->active_tasks()) {
2600       CMTask* task = _cm->task(worker_id);
2601       task->record_start_time();
2602       {
2603         ResourceMark rm;
2604         HandleMark hm;
2605 
2606         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2607         Threads::threads_do(&threads_f);
2608       }
2609 
2610       do {
2611         task->do_marking_step(1000000000.0 /* something very large */,
2612                               true         /* do_termination       */,
2613                               false        /* is_serial            */);
2614       } while (task->has_aborted() && !_cm->has_overflown());
2615       // If we overflow, then we do not want to restart. We instead
2616       // want to abort remark and do concurrent marking again.
2617       task->record_end_time();
2618     }
2619   }
2620 
2621   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2622     AbstractGangTask("Par Remark"), _cm(cm) {
2623     _cm->terminator()->reset_for_reuse(active_workers);
2624   }
2625 };
2626 
2627 void ConcurrentMark::checkpointRootsFinalWork() {
2628   ResourceMark rm;
2629   HandleMark   hm;
2630   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2631 
2632   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2633 
2634   g1h->ensure_parsability(false);
2635 
2636   G1CollectedHeap::StrongRootsScope srs(g1h);
2637   // this is remark, so we'll use up all active threads
2638   uint active_workers = g1h->workers()->active_workers();
2639   if (active_workers == 0) {
2640     assert(active_workers > 0, "Should have been set earlier");
2641     active_workers = (uint) ParallelGCThreads;
2642     g1h->workers()->set_active_workers(active_workers);
2643   }
2644   set_concurrency_and_phase(active_workers, false /* concurrent */);
2645   // Leave _parallel_marking_threads at it's
2646   // value originally calculated in the ConcurrentMark
2647   // constructor and pass values of the active workers
2648   // through the gang in the task.
2649 
2650   CMRemarkTask remarkTask(this, active_workers);
2651   // We will start all available threads, even if we decide that the
2652   // active_workers will be fewer. The extra ones will just bail out
2653   // immediately.
2654   g1h->set_par_threads(active_workers);
2655   g1h->workers()->run_task(&remarkTask);
2656   g1h->set_par_threads(0);
2657 
2658   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2659   guarantee(has_overflown() ||
2660             satb_mq_set.completed_buffers_num() == 0,
2661             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2662                     BOOL_TO_STR(has_overflown()),
2663                     satb_mq_set.completed_buffers_num()));
2664 
2665   print_stats();
2666 }
2667 
2668 #ifndef PRODUCT
2669 
2670 class PrintReachableOopClosure: public OopClosure {
2671 private:
2672   G1CollectedHeap* _g1h;
2673   outputStream*    _out;
2674   VerifyOption     _vo;
2675   bool             _all;
2676 
2677 public:
2678   PrintReachableOopClosure(outputStream* out,
2679                            VerifyOption  vo,
2680                            bool          all) :
2681     _g1h(G1CollectedHeap::heap()),
2682     _out(out), _vo(vo), _all(all) { }
2683 
2684   void do_oop(narrowOop* p) { do_oop_work(p); }
2685   void do_oop(      oop* p) { do_oop_work(p); }
2686 
2687   template <class T> void do_oop_work(T* p) {
2688     oop         obj = oopDesc::load_decode_heap_oop(p);
2689     const char* str = NULL;
2690     const char* str2 = "";
2691 
2692     if (obj == NULL) {
2693       str = "";
2694     } else if (!_g1h->is_in_g1_reserved(obj)) {
2695       str = " O";
2696     } else {
2697       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2698       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2699       bool marked = _g1h->is_marked(obj, _vo);
2700 
2701       if (over_tams) {
2702         str = " >";
2703         if (marked) {
2704           str2 = " AND MARKED";
2705         }
2706       } else if (marked) {
2707         str = " M";
2708       } else {
2709         str = " NOT";
2710       }
2711     }
2712 
2713     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2714                    p2i(p), p2i((void*) obj), str, str2);
2715   }
2716 };
2717 
2718 class PrintReachableObjectClosure : public ObjectClosure {
2719 private:
2720   G1CollectedHeap* _g1h;
2721   outputStream*    _out;
2722   VerifyOption     _vo;
2723   bool             _all;
2724   HeapRegion*      _hr;
2725 
2726 public:
2727   PrintReachableObjectClosure(outputStream* out,
2728                               VerifyOption  vo,
2729                               bool          all,
2730                               HeapRegion*   hr) :
2731     _g1h(G1CollectedHeap::heap()),
2732     _out(out), _vo(vo), _all(all), _hr(hr) { }
2733 
2734   void do_object(oop o) {
2735     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2736     bool marked = _g1h->is_marked(o, _vo);
2737     bool print_it = _all || over_tams || marked;
2738 
2739     if (print_it) {
2740       _out->print_cr(" "PTR_FORMAT"%s",
2741                      p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
2742       PrintReachableOopClosure oopCl(_out, _vo, _all);
2743       o->oop_iterate_no_header(&oopCl);
2744     }
2745   }
2746 };
2747 
2748 class PrintReachableRegionClosure : public HeapRegionClosure {
2749 private:
2750   G1CollectedHeap* _g1h;
2751   outputStream*    _out;
2752   VerifyOption     _vo;
2753   bool             _all;
2754 
2755 public:
2756   bool doHeapRegion(HeapRegion* hr) {
2757     HeapWord* b = hr->bottom();
2758     HeapWord* e = hr->end();
2759     HeapWord* t = hr->top();
2760     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2761     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2762                    "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
2763     _out->cr();
2764 
2765     HeapWord* from = b;
2766     HeapWord* to   = t;
2767 
2768     if (to > from) {
2769       _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
2770       _out->cr();
2771       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2772       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2773       _out->cr();
2774     }
2775 
2776     return false;
2777   }
2778 
2779   PrintReachableRegionClosure(outputStream* out,
2780                               VerifyOption  vo,
2781                               bool          all) :
2782     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2783 };
2784 
2785 void ConcurrentMark::print_reachable(const char* str,
2786                                      VerifyOption vo,
2787                                      bool all) {
2788   gclog_or_tty->cr();
2789   gclog_or_tty->print_cr("== Doing heap dump... ");
2790 
2791   if (G1PrintReachableBaseFile == NULL) {
2792     gclog_or_tty->print_cr("  #### error: no base file defined");
2793     return;
2794   }
2795 
2796   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2797       (JVM_MAXPATHLEN - 1)) {
2798     gclog_or_tty->print_cr("  #### error: file name too long");
2799     return;
2800   }
2801 
2802   char file_name[JVM_MAXPATHLEN];
2803   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2804   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2805 
2806   fileStream fout(file_name);
2807   if (!fout.is_open()) {
2808     gclog_or_tty->print_cr("  #### error: could not open file");
2809     return;
2810   }
2811 
2812   outputStream* out = &fout;
2813   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2814   out->cr();
2815 
2816   out->print_cr("--- ITERATING OVER REGIONS");
2817   out->cr();
2818   PrintReachableRegionClosure rcl(out, vo, all);
2819   _g1h->heap_region_iterate(&rcl);
2820   out->cr();
2821 
2822   gclog_or_tty->print_cr("  done");
2823   gclog_or_tty->flush();
2824 }
2825 
2826 #endif // PRODUCT
2827 
2828 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2829   // Note we are overriding the read-only view of the prev map here, via
2830   // the cast.
2831   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2832 }
2833 
2834 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2835   _nextMarkBitMap->clearRange(mr);
2836 }
2837 
2838 HeapRegion*
2839 ConcurrentMark::claim_region(uint worker_id) {
2840   // "checkpoint" the finger
2841   HeapWord* finger = _finger;
2842 
2843   // _heap_end will not change underneath our feet; it only changes at
2844   // yield points.
2845   while (finger < _heap_end) {
2846     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2847 
2848     // Note on how this code handles humongous regions. In the
2849     // normal case the finger will reach the start of a "starts
2850     // humongous" (SH) region. Its end will either be the end of the
2851     // last "continues humongous" (CH) region in the sequence, or the
2852     // standard end of the SH region (if the SH is the only region in
2853     // the sequence). That way claim_region() will skip over the CH
2854     // regions. However, there is a subtle race between a CM thread
2855     // executing this method and a mutator thread doing a humongous
2856     // object allocation. The two are not mutually exclusive as the CM
2857     // thread does not need to hold the Heap_lock when it gets
2858     // here. So there is a chance that claim_region() will come across
2859     // a free region that's in the progress of becoming a SH or a CH
2860     // region. In the former case, it will either
2861     //   a) Miss the update to the region's end, in which case it will
2862     //      visit every subsequent CH region, will find their bitmaps
2863     //      empty, and do nothing, or
2864     //   b) Will observe the update of the region's end (in which case
2865     //      it will skip the subsequent CH regions).
2866     // If it comes across a region that suddenly becomes CH, the
2867     // scenario will be similar to b). So, the race between
2868     // claim_region() and a humongous object allocation might force us
2869     // to do a bit of unnecessary work (due to some unnecessary bitmap
2870     // iterations) but it should not introduce and correctness issues.
2871     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2872 
2873     // Above heap_region_containing_raw may return NULL as we always scan claim
2874     // until the end of the heap. In this case, just jump to the next region.
2875     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2876 
2877     // Is the gap between reading the finger and doing the CAS too long?
2878     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2879     if (res == finger && curr_region != NULL) {
2880       // we succeeded
2881       HeapWord*   bottom        = curr_region->bottom();
2882       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2883 
2884       if (verbose_low()) {
2885         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2886                                "["PTR_FORMAT", "PTR_FORMAT"), "
2887                                "limit = "PTR_FORMAT,
2888                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2889       }
2890 
2891       // notice that _finger == end cannot be guaranteed here since,
2892       // someone else might have moved the finger even further
2893       assert(_finger >= end, "the finger should have moved forward");
2894 
2895       if (verbose_low()) {
2896         gclog_or_tty->print_cr("[%u] we were successful with region = "
2897                                PTR_FORMAT, worker_id, p2i(curr_region));
2898       }
2899 
2900       if (limit > bottom) {
2901         if (verbose_low()) {
2902           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2903                                  "returning it ", worker_id, p2i(curr_region));
2904         }
2905         return curr_region;
2906       } else {
2907         assert(limit == bottom,
2908                "the region limit should be at bottom");
2909         if (verbose_low()) {
2910           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2911                                  "returning NULL", worker_id, p2i(curr_region));
2912         }
2913         // we return NULL and the caller should try calling
2914         // claim_region() again.
2915         return NULL;
2916       }
2917     } else {
2918       assert(_finger > finger, "the finger should have moved forward");
2919       if (verbose_low()) {
2920         if (curr_region == NULL) {
2921           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2922                                  "global finger = "PTR_FORMAT", "
2923                                  "our finger = "PTR_FORMAT,
2924                                  worker_id, p2i(_finger), p2i(finger));
2925         } else {
2926           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2927                                  "global finger = "PTR_FORMAT", "
2928                                  "our finger = "PTR_FORMAT,
2929                                  worker_id, p2i(_finger), p2i(finger));
2930         }
2931       }
2932 
2933       // read it again
2934       finger = _finger;
2935     }
2936   }
2937 
2938   return NULL;
2939 }
2940 
2941 #ifndef PRODUCT
2942 enum VerifyNoCSetOopsPhase {
2943   VerifyNoCSetOopsStack,
2944   VerifyNoCSetOopsQueues,
2945   VerifyNoCSetOopsSATBCompleted,
2946   VerifyNoCSetOopsSATBThread
2947 };
2948 
2949 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2950 private:
2951   G1CollectedHeap* _g1h;
2952   VerifyNoCSetOopsPhase _phase;
2953   int _info;
2954 
2955   const char* phase_str() {
2956     switch (_phase) {
2957     case VerifyNoCSetOopsStack:         return "Stack";
2958     case VerifyNoCSetOopsQueues:        return "Queue";
2959     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2960     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2961     default:                            ShouldNotReachHere();
2962     }
2963     return NULL;
2964   }
2965 
2966   void do_object_work(oop obj) {
2967     guarantee(!_g1h->obj_in_cs(obj),
2968               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2969                       p2i((void*) obj), phase_str(), _info));
2970   }
2971 
2972 public:
2973   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2974 
2975   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2976     _phase = phase;
2977     _info = info;
2978   }
2979 
2980   virtual void do_oop(oop* p) {
2981     oop obj = oopDesc::load_decode_heap_oop(p);
2982     do_object_work(obj);
2983   }
2984 
2985   virtual void do_oop(narrowOop* p) {
2986     // We should not come across narrow oops while scanning marking
2987     // stacks and SATB buffers.
2988     ShouldNotReachHere();
2989   }
2990 
2991   virtual void do_object(oop obj) {
2992     do_object_work(obj);
2993   }
2994 };
2995 
2996 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2997                                          bool verify_enqueued_buffers,
2998                                          bool verify_thread_buffers,
2999                                          bool verify_fingers) {
3000   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
3001   if (!G1CollectedHeap::heap()->mark_in_progress()) {
3002     return;
3003   }
3004 
3005   VerifyNoCSetOopsClosure cl;
3006 
3007   if (verify_stacks) {
3008     // Verify entries on the global mark stack
3009     cl.set_phase(VerifyNoCSetOopsStack);
3010     _markStack.oops_do(&cl);
3011 
3012     // Verify entries on the task queues
3013     for (uint i = 0; i < _max_worker_id; i += 1) {
3014       cl.set_phase(VerifyNoCSetOopsQueues, i);
3015       CMTaskQueue* queue = _task_queues->queue(i);
3016       queue->oops_do(&cl);
3017     }
3018   }
3019 
3020   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
3021 
3022   // Verify entries on the enqueued SATB buffers
3023   if (verify_enqueued_buffers) {
3024     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
3025     satb_qs.iterate_completed_buffers_read_only(&cl);
3026   }
3027 
3028   // Verify entries on the per-thread SATB buffers
3029   if (verify_thread_buffers) {
3030     cl.set_phase(VerifyNoCSetOopsSATBThread);
3031     satb_qs.iterate_thread_buffers_read_only(&cl);
3032   }
3033 
3034   if (verify_fingers) {
3035     // Verify the global finger
3036     HeapWord* global_finger = finger();
3037     if (global_finger != NULL && global_finger < _heap_end) {
3038       // The global finger always points to a heap region boundary. We
3039       // use heap_region_containing_raw() to get the containing region
3040       // given that the global finger could be pointing to a free region
3041       // which subsequently becomes continues humongous. If that
3042       // happens, heap_region_containing() will return the bottom of the
3043       // corresponding starts humongous region and the check below will
3044       // not hold any more.
3045       // Since we always iterate over all regions, we might get a NULL HeapRegion
3046       // here.
3047       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3048       guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3049                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3050                         p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3051     }
3052 
3053     // Verify the task fingers
3054     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3055     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3056       CMTask* task = _tasks[i];
3057       HeapWord* task_finger = task->finger();
3058       if (task_finger != NULL && task_finger < _heap_end) {
3059         // See above note on the global finger verification.
3060         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3061         guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3062                   !task_hr->in_collection_set(),
3063                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3064                           p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3065       }
3066     }
3067   }
3068 }
3069 #endif // PRODUCT
3070 
3071 // Aggregate the counting data that was constructed concurrently
3072 // with marking.
3073 class AggregateCountDataHRClosure: public HeapRegionClosure {
3074   G1CollectedHeap* _g1h;
3075   ConcurrentMark* _cm;
3076   CardTableModRefBS* _ct_bs;
3077   BitMap* _cm_card_bm;
3078   uint _max_worker_id;
3079 
3080  public:
3081   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3082                               BitMap* cm_card_bm,
3083                               uint max_worker_id) :
3084     _g1h(g1h), _cm(g1h->concurrent_mark()),
3085     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3086     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3087 
3088   bool doHeapRegion(HeapRegion* hr) {
3089     if (hr->is_continues_humongous()) {
3090       // We will ignore these here and process them when their
3091       // associated "starts humongous" region is processed.
3092       // Note that we cannot rely on their associated
3093       // "starts humongous" region to have their bit set to 1
3094       // since, due to the region chunking in the parallel region
3095       // iteration, a "continues humongous" region might be visited
3096       // before its associated "starts humongous".
3097       return false;
3098     }
3099 
3100     HeapWord* start = hr->bottom();
3101     HeapWord* limit = hr->next_top_at_mark_start();
3102     HeapWord* end = hr->end();
3103 
3104     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3105            err_msg("Preconditions not met - "
3106                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3107                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3108                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3109 
3110     assert(hr->next_marked_bytes() == 0, "Precondition");
3111 
3112     if (start == limit) {
3113       // NTAMS of this region has not been set so nothing to do.
3114       return false;
3115     }
3116 
3117     // 'start' should be in the heap.
3118     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3119     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
3120     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3121 
3122     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3123     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3124     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3125 
3126     // If ntams is not card aligned then we bump card bitmap index
3127     // for limit so that we get the all the cards spanned by
3128     // the object ending at ntams.
3129     // Note: if this is the last region in the heap then ntams
3130     // could be actually just beyond the end of the the heap;
3131     // limit_idx will then  correspond to a (non-existent) card
3132     // that is also outside the heap.
3133     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3134       limit_idx += 1;
3135     }
3136 
3137     assert(limit_idx <= end_idx, "or else use atomics");
3138 
3139     // Aggregate the "stripe" in the count data associated with hr.
3140     uint hrm_index = hr->hrm_index();
3141     size_t marked_bytes = 0;
3142 
3143     for (uint i = 0; i < _max_worker_id; i += 1) {
3144       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3145       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3146 
3147       // Fetch the marked_bytes in this region for task i and
3148       // add it to the running total for this region.
3149       marked_bytes += marked_bytes_array[hrm_index];
3150 
3151       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3152       // into the global card bitmap.
3153       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3154 
3155       while (scan_idx < limit_idx) {
3156         assert(task_card_bm->at(scan_idx) == true, "should be");
3157         _cm_card_bm->set_bit(scan_idx);
3158         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3159 
3160         // BitMap::get_next_one_offset() can handle the case when
3161         // its left_offset parameter is greater than its right_offset
3162         // parameter. It does, however, have an early exit if
3163         // left_offset == right_offset. So let's limit the value
3164         // passed in for left offset here.
3165         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3166         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3167       }
3168     }
3169 
3170     // Update the marked bytes for this region.
3171     hr->add_to_marked_bytes(marked_bytes);
3172 
3173     // Next heap region
3174     return false;
3175   }
3176 };
3177 
3178 class G1AggregateCountDataTask: public AbstractGangTask {
3179 protected:
3180   G1CollectedHeap* _g1h;
3181   ConcurrentMark* _cm;
3182   BitMap* _cm_card_bm;
3183   uint _max_worker_id;
3184   int _active_workers;
3185   HeapRegionClaimer _hrclaimer;
3186 
3187 public:
3188   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3189                            ConcurrentMark* cm,
3190                            BitMap* cm_card_bm,
3191                            uint max_worker_id,
3192                            int n_workers) :
3193       AbstractGangTask("Count Aggregation"),
3194       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3195       _max_worker_id(max_worker_id),
3196       _active_workers(n_workers),
3197       _hrclaimer(_active_workers) {
3198   }
3199 
3200   void work(uint worker_id) {
3201     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3202 
3203     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3204   }
3205 };
3206 
3207 
3208 void ConcurrentMark::aggregate_count_data() {
3209   int n_workers = _g1h->workers()->active_workers();
3210 
3211   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3212                                            _max_worker_id, n_workers);
3213 
3214   _g1h->set_par_threads(n_workers);
3215   _g1h->workers()->run_task(&g1_par_agg_task);
3216   _g1h->set_par_threads(0);
3217   _g1h->allocation_context_stats().update_at_remark();
3218 }
3219 
3220 // Clear the per-worker arrays used to store the per-region counting data
3221 void ConcurrentMark::clear_all_count_data() {
3222   // Clear the global card bitmap - it will be filled during
3223   // liveness count aggregation (during remark) and the
3224   // final counting task.
3225   _card_bm.clear();
3226 
3227   // Clear the global region bitmap - it will be filled as part
3228   // of the final counting task.
3229   _region_bm.clear();
3230 
3231   uint max_regions = _g1h->max_regions();
3232   assert(_max_worker_id > 0, "uninitialized");
3233 
3234   for (uint i = 0; i < _max_worker_id; i += 1) {
3235     BitMap* task_card_bm = count_card_bitmap_for(i);
3236     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3237 
3238     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3239     assert(marked_bytes_array != NULL, "uninitialized");
3240 
3241     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3242     task_card_bm->clear();
3243   }
3244 }
3245 
3246 void ConcurrentMark::print_stats() {
3247   if (verbose_stats()) {
3248     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3249     for (size_t i = 0; i < _active_tasks; ++i) {
3250       _tasks[i]->print_stats();
3251       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3252     }
3253   }
3254 }
3255 
3256 // abandon current marking iteration due to a Full GC
3257 void ConcurrentMark::abort() {
3258   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3259   // concurrent bitmap clearing.
3260   _nextMarkBitMap->clearAll();
3261 
3262   // Note we cannot clear the previous marking bitmap here
3263   // since VerifyDuringGC verifies the objects marked during
3264   // a full GC against the previous bitmap.
3265 
3266   // Clear the liveness counting data
3267   clear_all_count_data();
3268   // Empty mark stack
3269   reset_marking_state();
3270   for (uint i = 0; i < _max_worker_id; ++i) {
3271     _tasks[i]->clear_region_fields();
3272   }
3273   _first_overflow_barrier_sync.abort();
3274   _second_overflow_barrier_sync.abort();
3275   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3276   if (!gc_id.is_undefined()) {
3277     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3278     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3279     _aborted_gc_id = gc_id;
3280    }
3281   _has_aborted = true;
3282 
3283   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3284   satb_mq_set.abandon_partial_marking();
3285   // This can be called either during or outside marking, we'll read
3286   // the expected_active value from the SATB queue set.
3287   satb_mq_set.set_active_all_threads(
3288                                  false, /* new active value */
3289                                  satb_mq_set.is_active() /* expected_active */);
3290 
3291   _g1h->trace_heap_after_concurrent_cycle();
3292   _g1h->register_concurrent_cycle_end();
3293 }
3294 
3295 const GCId& ConcurrentMark::concurrent_gc_id() {
3296   if (has_aborted()) {
3297     return _aborted_gc_id;
3298   }
3299   return _g1h->gc_tracer_cm()->gc_id();
3300 }
3301 
3302 static void print_ms_time_info(const char* prefix, const char* name,
3303                                NumberSeq& ns) {
3304   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3305                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3306   if (ns.num() > 0) {
3307     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3308                            prefix, ns.sd(), ns.maximum());
3309   }
3310 }
3311 
3312 void ConcurrentMark::print_summary_info() {
3313   gclog_or_tty->print_cr(" Concurrent marking:");
3314   print_ms_time_info("  ", "init marks", _init_times);
3315   print_ms_time_info("  ", "remarks", _remark_times);
3316   {
3317     print_ms_time_info("     ", "final marks", _remark_mark_times);
3318     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3319 
3320   }
3321   print_ms_time_info("  ", "cleanups", _cleanup_times);
3322   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3323                          _total_counting_time,
3324                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3325                           (double)_cleanup_times.num()
3326                          : 0.0));
3327   if (G1ScrubRemSets) {
3328     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3329                            _total_rs_scrub_time,
3330                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3331                             (double)_cleanup_times.num()
3332                            : 0.0));
3333   }
3334   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3335                          (_init_times.sum() + _remark_times.sum() +
3336                           _cleanup_times.sum())/1000.0);
3337   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3338                 "(%8.2f s marking).",
3339                 cmThread()->vtime_accum(),
3340                 cmThread()->vtime_mark_accum());
3341 }
3342 
3343 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3344   if (use_parallel_marking_threads()) {
3345     _parallel_workers->print_worker_threads_on(st);
3346   }
3347 }
3348 
3349 void ConcurrentMark::print_on_error(outputStream* st) const {
3350   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3351       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3352   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3353   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3354 }
3355 
3356 // We take a break if someone is trying to stop the world.
3357 bool ConcurrentMark::do_yield_check(uint worker_id) {
3358   if (SuspendibleThreadSet::should_yield()) {
3359     if (worker_id == 0) {
3360       _g1h->g1_policy()->record_concurrent_pause();
3361     }
3362     SuspendibleThreadSet::yield();
3363     return true;
3364   } else {
3365     return false;
3366   }
3367 }
3368 
3369 #ifndef PRODUCT
3370 // for debugging purposes
3371 void ConcurrentMark::print_finger() {
3372   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3373                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3374   for (uint i = 0; i < _max_worker_id; ++i) {
3375     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3376   }
3377   gclog_or_tty->cr();
3378 }
3379 #endif
3380 
3381 void CMTask::scan_object(oop obj) {
3382   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3383 
3384   if (_cm->verbose_high()) {
3385     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3386                            _worker_id, p2i((void*) obj));
3387   }
3388 
3389   size_t obj_size = obj->size();
3390   _words_scanned += obj_size;
3391 
3392   obj->oop_iterate(_cm_oop_closure);
3393   statsOnly( ++_objs_scanned );
3394   check_limits();
3395 }
3396 
3397 // Closure for iteration over bitmaps
3398 class CMBitMapClosure : public BitMapClosure {
3399 private:
3400   // the bitmap that is being iterated over
3401   CMBitMap*                   _nextMarkBitMap;
3402   ConcurrentMark*             _cm;
3403   CMTask*                     _task;
3404 
3405 public:
3406   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3407     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3408 
3409   bool do_bit(size_t offset) {
3410     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3411     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3412     assert( addr < _cm->finger(), "invariant");
3413 
3414     statsOnly( _task->increase_objs_found_on_bitmap() );
3415     assert(addr >= _task->finger(), "invariant");
3416 
3417     // We move that task's local finger along.
3418     _task->move_finger_to(addr);
3419 
3420     _task->scan_object(oop(addr));
3421     // we only partially drain the local queue and global stack
3422     _task->drain_local_queue(true);
3423     _task->drain_global_stack(true);
3424 
3425     // if the has_aborted flag has been raised, we need to bail out of
3426     // the iteration
3427     return !_task->has_aborted();
3428   }
3429 };
3430 
3431 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3432                                ConcurrentMark* cm,
3433                                CMTask* task)
3434   : _g1h(g1h), _cm(cm), _task(task) {
3435   assert(_ref_processor == NULL, "should be initialized to NULL");
3436 
3437   if (G1UseConcMarkReferenceProcessing) {
3438     _ref_processor = g1h->ref_processor_cm();
3439     assert(_ref_processor != NULL, "should not be NULL");
3440   }
3441 }
3442 
3443 void CMTask::setup_for_region(HeapRegion* hr) {
3444   assert(hr != NULL,
3445         "claim_region() should have filtered out NULL regions");
3446   assert(!hr->is_continues_humongous(),
3447         "claim_region() should have filtered out continues humongous regions");
3448 
3449   if (_cm->verbose_low()) {
3450     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3451                            _worker_id, p2i(hr));
3452   }
3453 
3454   _curr_region  = hr;
3455   _finger       = hr->bottom();
3456   update_region_limit();
3457 }
3458 
3459 void CMTask::update_region_limit() {
3460   HeapRegion* hr            = _curr_region;
3461   HeapWord* bottom          = hr->bottom();
3462   HeapWord* limit           = hr->next_top_at_mark_start();
3463 
3464   if (limit == bottom) {
3465     if (_cm->verbose_low()) {
3466       gclog_or_tty->print_cr("[%u] found an empty region "
3467                              "["PTR_FORMAT", "PTR_FORMAT")",
3468                              _worker_id, p2i(bottom), p2i(limit));
3469     }
3470     // The region was collected underneath our feet.
3471     // We set the finger to bottom to ensure that the bitmap
3472     // iteration that will follow this will not do anything.
3473     // (this is not a condition that holds when we set the region up,
3474     // as the region is not supposed to be empty in the first place)
3475     _finger = bottom;
3476   } else if (limit >= _region_limit) {
3477     assert(limit >= _finger, "peace of mind");
3478   } else {
3479     assert(limit < _region_limit, "only way to get here");
3480     // This can happen under some pretty unusual circumstances.  An
3481     // evacuation pause empties the region underneath our feet (NTAMS
3482     // at bottom). We then do some allocation in the region (NTAMS
3483     // stays at bottom), followed by the region being used as a GC
3484     // alloc region (NTAMS will move to top() and the objects
3485     // originally below it will be grayed). All objects now marked in
3486     // the region are explicitly grayed, if below the global finger,
3487     // and we do not need in fact to scan anything else. So, we simply
3488     // set _finger to be limit to ensure that the bitmap iteration
3489     // doesn't do anything.
3490     _finger = limit;
3491   }
3492 
3493   _region_limit = limit;
3494 }
3495 
3496 void CMTask::giveup_current_region() {
3497   assert(_curr_region != NULL, "invariant");
3498   if (_cm->verbose_low()) {
3499     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3500                            _worker_id, p2i(_curr_region));
3501   }
3502   clear_region_fields();
3503 }
3504 
3505 void CMTask::clear_region_fields() {
3506   // Values for these three fields that indicate that we're not
3507   // holding on to a region.
3508   _curr_region   = NULL;
3509   _finger        = NULL;
3510   _region_limit  = NULL;
3511 }
3512 
3513 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3514   if (cm_oop_closure == NULL) {
3515     assert(_cm_oop_closure != NULL, "invariant");
3516   } else {
3517     assert(_cm_oop_closure == NULL, "invariant");
3518   }
3519   _cm_oop_closure = cm_oop_closure;
3520 }
3521 
3522 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3523   guarantee(nextMarkBitMap != NULL, "invariant");
3524 
3525   if (_cm->verbose_low()) {
3526     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3527   }
3528 
3529   _nextMarkBitMap                = nextMarkBitMap;
3530   clear_region_fields();
3531 
3532   _calls                         = 0;
3533   _elapsed_time_ms               = 0.0;
3534   _termination_time_ms           = 0.0;
3535   _termination_start_time_ms     = 0.0;
3536 
3537 #if _MARKING_STATS_
3538   _local_pushes                  = 0;
3539   _local_pops                    = 0;
3540   _local_max_size                = 0;
3541   _objs_scanned                  = 0;
3542   _global_pushes                 = 0;
3543   _global_pops                   = 0;
3544   _global_max_size               = 0;
3545   _global_transfers_to           = 0;
3546   _global_transfers_from         = 0;
3547   _regions_claimed               = 0;
3548   _objs_found_on_bitmap          = 0;
3549   _satb_buffers_processed        = 0;
3550   _steal_attempts                = 0;
3551   _steals                        = 0;
3552   _aborted                       = 0;
3553   _aborted_overflow              = 0;
3554   _aborted_cm_aborted            = 0;
3555   _aborted_yield                 = 0;
3556   _aborted_timed_out             = 0;
3557   _aborted_satb                  = 0;
3558   _aborted_termination           = 0;
3559 #endif // _MARKING_STATS_
3560 }
3561 
3562 bool CMTask::should_exit_termination() {
3563   regular_clock_call();
3564   // This is called when we are in the termination protocol. We should
3565   // quit if, for some reason, this task wants to abort or the global
3566   // stack is not empty (this means that we can get work from it).
3567   return !_cm->mark_stack_empty() || has_aborted();
3568 }
3569 
3570 void CMTask::reached_limit() {
3571   assert(_words_scanned >= _words_scanned_limit ||
3572          _refs_reached >= _refs_reached_limit ,
3573          "shouldn't have been called otherwise");
3574   regular_clock_call();
3575 }
3576 
3577 void CMTask::regular_clock_call() {
3578   if (has_aborted()) return;
3579 
3580   // First, we need to recalculate the words scanned and refs reached
3581   // limits for the next clock call.
3582   recalculate_limits();
3583 
3584   // During the regular clock call we do the following
3585 
3586   // (1) If an overflow has been flagged, then we abort.
3587   if (_cm->has_overflown()) {
3588     set_has_aborted();
3589     return;
3590   }
3591 
3592   // If we are not concurrent (i.e. we're doing remark) we don't need
3593   // to check anything else. The other steps are only needed during
3594   // the concurrent marking phase.
3595   if (!concurrent()) return;
3596 
3597   // (2) If marking has been aborted for Full GC, then we also abort.
3598   if (_cm->has_aborted()) {
3599     set_has_aborted();
3600     statsOnly( ++_aborted_cm_aborted );
3601     return;
3602   }
3603 
3604   double curr_time_ms = os::elapsedVTime() * 1000.0;
3605 
3606   // (3) If marking stats are enabled, then we update the step history.
3607 #if _MARKING_STATS_
3608   if (_words_scanned >= _words_scanned_limit) {
3609     ++_clock_due_to_scanning;
3610   }
3611   if (_refs_reached >= _refs_reached_limit) {
3612     ++_clock_due_to_marking;
3613   }
3614 
3615   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3616   _interval_start_time_ms = curr_time_ms;
3617   _all_clock_intervals_ms.add(last_interval_ms);
3618 
3619   if (_cm->verbose_medium()) {
3620       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3621                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3622                         _worker_id, last_interval_ms,
3623                         _words_scanned,
3624                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3625                         _refs_reached,
3626                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3627   }
3628 #endif // _MARKING_STATS_
3629 
3630   // (4) We check whether we should yield. If we have to, then we abort.
3631   if (SuspendibleThreadSet::should_yield()) {
3632     // We should yield. To do this we abort the task. The caller is
3633     // responsible for yielding.
3634     set_has_aborted();
3635     statsOnly( ++_aborted_yield );
3636     return;
3637   }
3638 
3639   // (5) We check whether we've reached our time quota. If we have,
3640   // then we abort.
3641   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3642   if (elapsed_time_ms > _time_target_ms) {
3643     set_has_aborted();
3644     _has_timed_out = true;
3645     statsOnly( ++_aborted_timed_out );
3646     return;
3647   }
3648 
3649   // (6) Finally, we check whether there are enough completed STAB
3650   // buffers available for processing. If there are, we abort.
3651   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3652   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3653     if (_cm->verbose_low()) {
3654       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3655                              _worker_id);
3656     }
3657     // we do need to process SATB buffers, we'll abort and restart
3658     // the marking task to do so
3659     set_has_aborted();
3660     statsOnly( ++_aborted_satb );
3661     return;
3662   }
3663 }
3664 
3665 void CMTask::recalculate_limits() {
3666   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3667   _words_scanned_limit      = _real_words_scanned_limit;
3668 
3669   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3670   _refs_reached_limit       = _real_refs_reached_limit;
3671 }
3672 
3673 void CMTask::decrease_limits() {
3674   // This is called when we believe that we're going to do an infrequent
3675   // operation which will increase the per byte scanned cost (i.e. move
3676   // entries to/from the global stack). It basically tries to decrease the
3677   // scanning limit so that the clock is called earlier.
3678 
3679   if (_cm->verbose_medium()) {
3680     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3681   }
3682 
3683   _words_scanned_limit = _real_words_scanned_limit -
3684     3 * words_scanned_period / 4;
3685   _refs_reached_limit  = _real_refs_reached_limit -
3686     3 * refs_reached_period / 4;
3687 }
3688 
3689 void CMTask::move_entries_to_global_stack() {
3690   // local array where we'll store the entries that will be popped
3691   // from the local queue
3692   oop buffer[global_stack_transfer_size];
3693 
3694   int n = 0;
3695   oop obj;
3696   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3697     buffer[n] = obj;
3698     ++n;
3699   }
3700 
3701   if (n > 0) {
3702     // we popped at least one entry from the local queue
3703 
3704     statsOnly( ++_global_transfers_to; _local_pops += n );
3705 
3706     if (!_cm->mark_stack_push(buffer, n)) {
3707       if (_cm->verbose_low()) {
3708         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3709                                _worker_id);
3710       }
3711       set_has_aborted();
3712     } else {
3713       // the transfer was successful
3714 
3715       if (_cm->verbose_medium()) {
3716         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3717                                _worker_id, n);
3718       }
3719       statsOnly( int tmp_size = _cm->mark_stack_size();
3720                  if (tmp_size > _global_max_size) {
3721                    _global_max_size = tmp_size;
3722                  }
3723                  _global_pushes += n );
3724     }
3725   }
3726 
3727   // this operation was quite expensive, so decrease the limits
3728   decrease_limits();
3729 }
3730 
3731 void CMTask::get_entries_from_global_stack() {
3732   // local array where we'll store the entries that will be popped
3733   // from the global stack.
3734   oop buffer[global_stack_transfer_size];
3735   int n;
3736   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3737   assert(n <= global_stack_transfer_size,
3738          "we should not pop more than the given limit");
3739   if (n > 0) {
3740     // yes, we did actually pop at least one entry
3741 
3742     statsOnly( ++_global_transfers_from; _global_pops += n );
3743     if (_cm->verbose_medium()) {
3744       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3745                              _worker_id, n);
3746     }
3747     for (int i = 0; i < n; ++i) {
3748       bool success = _task_queue->push(buffer[i]);
3749       // We only call this when the local queue is empty or under a
3750       // given target limit. So, we do not expect this push to fail.
3751       assert(success, "invariant");
3752     }
3753 
3754     statsOnly( int tmp_size = _task_queue->size();
3755                if (tmp_size > _local_max_size) {
3756                  _local_max_size = tmp_size;
3757                }
3758                _local_pushes += n );
3759   }
3760 
3761   // this operation was quite expensive, so decrease the limits
3762   decrease_limits();
3763 }
3764 
3765 void CMTask::drain_local_queue(bool partially) {
3766   if (has_aborted()) return;
3767 
3768   // Decide what the target size is, depending whether we're going to
3769   // drain it partially (so that other tasks can steal if they run out
3770   // of things to do) or totally (at the very end).
3771   size_t target_size;
3772   if (partially) {
3773     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3774   } else {
3775     target_size = 0;
3776   }
3777 
3778   if (_task_queue->size() > target_size) {
3779     if (_cm->verbose_high()) {
3780       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3781                              _worker_id, target_size);
3782     }
3783 
3784     oop obj;
3785     bool ret = _task_queue->pop_local(obj);
3786     while (ret) {
3787       statsOnly( ++_local_pops );
3788 
3789       if (_cm->verbose_high()) {
3790         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3791                                p2i((void*) obj));
3792       }
3793 
3794       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3795       assert(!_g1h->is_on_master_free_list(
3796                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3797 
3798       scan_object(obj);
3799 
3800       if (_task_queue->size() <= target_size || has_aborted()) {
3801         ret = false;
3802       } else {
3803         ret = _task_queue->pop_local(obj);
3804       }
3805     }
3806 
3807     if (_cm->verbose_high()) {
3808       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3809                              _worker_id, _task_queue->size());
3810     }
3811   }
3812 }
3813 
3814 void CMTask::drain_global_stack(bool partially) {
3815   if (has_aborted()) return;
3816 
3817   // We have a policy to drain the local queue before we attempt to
3818   // drain the global stack.
3819   assert(partially || _task_queue->size() == 0, "invariant");
3820 
3821   // Decide what the target size is, depending whether we're going to
3822   // drain it partially (so that other tasks can steal if they run out
3823   // of things to do) or totally (at the very end).  Notice that,
3824   // because we move entries from the global stack in chunks or
3825   // because another task might be doing the same, we might in fact
3826   // drop below the target. But, this is not a problem.
3827   size_t target_size;
3828   if (partially) {
3829     target_size = _cm->partial_mark_stack_size_target();
3830   } else {
3831     target_size = 0;
3832   }
3833 
3834   if (_cm->mark_stack_size() > target_size) {
3835     if (_cm->verbose_low()) {
3836       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3837                              _worker_id, target_size);
3838     }
3839 
3840     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3841       get_entries_from_global_stack();
3842       drain_local_queue(partially);
3843     }
3844 
3845     if (_cm->verbose_low()) {
3846       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3847                              _worker_id, _cm->mark_stack_size());
3848     }
3849   }
3850 }
3851 
3852 // SATB Queue has several assumptions on whether to call the par or
3853 // non-par versions of the methods. this is why some of the code is
3854 // replicated. We should really get rid of the single-threaded version
3855 // of the code to simplify things.
3856 void CMTask::drain_satb_buffers() {
3857   if (has_aborted()) return;
3858 
3859   // We set this so that the regular clock knows that we're in the
3860   // middle of draining buffers and doesn't set the abort flag when it
3861   // notices that SATB buffers are available for draining. It'd be
3862   // very counter productive if it did that. :-)
3863   _draining_satb_buffers = true;
3864 
3865   CMObjectClosure oc(this);
3866   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3867   satb_mq_set.set_closure(_worker_id, &oc);
3868 
3869   // This keeps claiming and applying the closure to completed buffers
3870   // until we run out of buffers or we need to abort.
3871   while (!has_aborted() &&
3872          satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) {
3873     if (_cm->verbose_medium()) {
3874       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3875     }
3876     statsOnly( ++_satb_buffers_processed );
3877     regular_clock_call();
3878   }
3879 
3880   _draining_satb_buffers = false;
3881 
3882   assert(has_aborted() ||
3883          concurrent() ||
3884          satb_mq_set.completed_buffers_num() == 0, "invariant");
3885 
3886   satb_mq_set.set_closure(_worker_id, NULL);
3887 
3888   // again, this was a potentially expensive operation, decrease the
3889   // limits to get the regular clock call early
3890   decrease_limits();
3891 }
3892 
3893 void CMTask::print_stats() {
3894   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3895                          _worker_id, _calls);
3896   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3897                          _elapsed_time_ms, _termination_time_ms);
3898   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3899                          _step_times_ms.num(), _step_times_ms.avg(),
3900                          _step_times_ms.sd());
3901   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3902                          _step_times_ms.maximum(), _step_times_ms.sum());
3903 
3904 #if _MARKING_STATS_
3905   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3906                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3907                          _all_clock_intervals_ms.sd());
3908   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3909                          _all_clock_intervals_ms.maximum(),
3910                          _all_clock_intervals_ms.sum());
3911   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
3912                          _clock_due_to_scanning, _clock_due_to_marking);
3913   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
3914                          _objs_scanned, _objs_found_on_bitmap);
3915   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
3916                          _local_pushes, _local_pops, _local_max_size);
3917   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
3918                          _global_pushes, _global_pops, _global_max_size);
3919   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
3920                          _global_transfers_to,_global_transfers_from);
3921   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
3922   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
3923   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
3924                          _steal_attempts, _steals);
3925   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
3926   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
3927                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3928   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
3929                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3930 #endif // _MARKING_STATS_
3931 }
3932 
3933 /*****************************************************************************
3934 
3935     The do_marking_step(time_target_ms, ...) method is the building
3936     block of the parallel marking framework. It can be called in parallel
3937     with other invocations of do_marking_step() on different tasks
3938     (but only one per task, obviously) and concurrently with the
3939     mutator threads, or during remark, hence it eliminates the need
3940     for two versions of the code. When called during remark, it will
3941     pick up from where the task left off during the concurrent marking
3942     phase. Interestingly, tasks are also claimable during evacuation
3943     pauses too, since do_marking_step() ensures that it aborts before
3944     it needs to yield.
3945 
3946     The data structures that it uses to do marking work are the
3947     following:
3948 
3949       (1) Marking Bitmap. If there are gray objects that appear only
3950       on the bitmap (this happens either when dealing with an overflow
3951       or when the initial marking phase has simply marked the roots
3952       and didn't push them on the stack), then tasks claim heap
3953       regions whose bitmap they then scan to find gray objects. A
3954       global finger indicates where the end of the last claimed region
3955       is. A local finger indicates how far into the region a task has
3956       scanned. The two fingers are used to determine how to gray an
3957       object (i.e. whether simply marking it is OK, as it will be
3958       visited by a task in the future, or whether it needs to be also
3959       pushed on a stack).
3960 
3961       (2) Local Queue. The local queue of the task which is accessed
3962       reasonably efficiently by the task. Other tasks can steal from
3963       it when they run out of work. Throughout the marking phase, a
3964       task attempts to keep its local queue short but not totally
3965       empty, so that entries are available for stealing by other
3966       tasks. Only when there is no more work, a task will totally
3967       drain its local queue.
3968 
3969       (3) Global Mark Stack. This handles local queue overflow. During
3970       marking only sets of entries are moved between it and the local
3971       queues, as access to it requires a mutex and more fine-grain
3972       interaction with it which might cause contention. If it
3973       overflows, then the marking phase should restart and iterate
3974       over the bitmap to identify gray objects. Throughout the marking
3975       phase, tasks attempt to keep the global mark stack at a small
3976       length but not totally empty, so that entries are available for
3977       popping by other tasks. Only when there is no more work, tasks
3978       will totally drain the global mark stack.
3979 
3980       (4) SATB Buffer Queue. This is where completed SATB buffers are
3981       made available. Buffers are regularly removed from this queue
3982       and scanned for roots, so that the queue doesn't get too
3983       long. During remark, all completed buffers are processed, as
3984       well as the filled in parts of any uncompleted buffers.
3985 
3986     The do_marking_step() method tries to abort when the time target
3987     has been reached. There are a few other cases when the
3988     do_marking_step() method also aborts:
3989 
3990       (1) When the marking phase has been aborted (after a Full GC).
3991 
3992       (2) When a global overflow (on the global stack) has been
3993       triggered. Before the task aborts, it will actually sync up with
3994       the other tasks to ensure that all the marking data structures
3995       (local queues, stacks, fingers etc.)  are re-initialized so that
3996       when do_marking_step() completes, the marking phase can
3997       immediately restart.
3998 
3999       (3) When enough completed SATB buffers are available. The
4000       do_marking_step() method only tries to drain SATB buffers right
4001       at the beginning. So, if enough buffers are available, the
4002       marking step aborts and the SATB buffers are processed at
4003       the beginning of the next invocation.
4004 
4005       (4) To yield. when we have to yield then we abort and yield
4006       right at the end of do_marking_step(). This saves us from a lot
4007       of hassle as, by yielding we might allow a Full GC. If this
4008       happens then objects will be compacted underneath our feet, the
4009       heap might shrink, etc. We save checking for this by just
4010       aborting and doing the yield right at the end.
4011 
4012     From the above it follows that the do_marking_step() method should
4013     be called in a loop (or, otherwise, regularly) until it completes.
4014 
4015     If a marking step completes without its has_aborted() flag being
4016     true, it means it has completed the current marking phase (and
4017     also all other marking tasks have done so and have all synced up).
4018 
4019     A method called regular_clock_call() is invoked "regularly" (in
4020     sub ms intervals) throughout marking. It is this clock method that
4021     checks all the abort conditions which were mentioned above and
4022     decides when the task should abort. A work-based scheme is used to
4023     trigger this clock method: when the number of object words the
4024     marking phase has scanned or the number of references the marking
4025     phase has visited reach a given limit. Additional invocations to
4026     the method clock have been planted in a few other strategic places
4027     too. The initial reason for the clock method was to avoid calling
4028     vtime too regularly, as it is quite expensive. So, once it was in
4029     place, it was natural to piggy-back all the other conditions on it
4030     too and not constantly check them throughout the code.
4031 
4032     If do_termination is true then do_marking_step will enter its
4033     termination protocol.
4034 
4035     The value of is_serial must be true when do_marking_step is being
4036     called serially (i.e. by the VMThread) and do_marking_step should
4037     skip any synchronization in the termination and overflow code.
4038     Examples include the serial remark code and the serial reference
4039     processing closures.
4040 
4041     The value of is_serial must be false when do_marking_step is
4042     being called by any of the worker threads in a work gang.
4043     Examples include the concurrent marking code (CMMarkingTask),
4044     the MT remark code, and the MT reference processing closures.
4045 
4046  *****************************************************************************/
4047 
4048 void CMTask::do_marking_step(double time_target_ms,
4049                              bool do_termination,
4050                              bool is_serial) {
4051   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4052   assert(concurrent() == _cm->concurrent(), "they should be the same");
4053 
4054   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4055   assert(_task_queues != NULL, "invariant");
4056   assert(_task_queue != NULL, "invariant");
4057   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4058 
4059   assert(!_claimed,
4060          "only one thread should claim this task at any one time");
4061 
4062   // OK, this doesn't safeguard again all possible scenarios, as it is
4063   // possible for two threads to set the _claimed flag at the same
4064   // time. But it is only for debugging purposes anyway and it will
4065   // catch most problems.
4066   _claimed = true;
4067 
4068   _start_time_ms = os::elapsedVTime() * 1000.0;
4069   statsOnly( _interval_start_time_ms = _start_time_ms );
4070 
4071   // If do_stealing is true then do_marking_step will attempt to
4072   // steal work from the other CMTasks. It only makes sense to
4073   // enable stealing when the termination protocol is enabled
4074   // and do_marking_step() is not being called serially.
4075   bool do_stealing = do_termination && !is_serial;
4076 
4077   double diff_prediction_ms =
4078     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4079   _time_target_ms = time_target_ms - diff_prediction_ms;
4080 
4081   // set up the variables that are used in the work-based scheme to
4082   // call the regular clock method
4083   _words_scanned = 0;
4084   _refs_reached  = 0;
4085   recalculate_limits();
4086 
4087   // clear all flags
4088   clear_has_aborted();
4089   _has_timed_out = false;
4090   _draining_satb_buffers = false;
4091 
4092   ++_calls;
4093 
4094   if (_cm->verbose_low()) {
4095     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4096                            "target = %1.2lfms >>>>>>>>>>",
4097                            _worker_id, _calls, _time_target_ms);
4098   }
4099 
4100   // Set up the bitmap and oop closures. Anything that uses them is
4101   // eventually called from this method, so it is OK to allocate these
4102   // statically.
4103   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4104   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4105   set_cm_oop_closure(&cm_oop_closure);
4106 
4107   if (_cm->has_overflown()) {
4108     // This can happen if the mark stack overflows during a GC pause
4109     // and this task, after a yield point, restarts. We have to abort
4110     // as we need to get into the overflow protocol which happens
4111     // right at the end of this task.
4112     set_has_aborted();
4113   }
4114 
4115   // First drain any available SATB buffers. After this, we will not
4116   // look at SATB buffers before the next invocation of this method.
4117   // If enough completed SATB buffers are queued up, the regular clock
4118   // will abort this task so that it restarts.
4119   drain_satb_buffers();
4120   // ...then partially drain the local queue and the global stack
4121   drain_local_queue(true);
4122   drain_global_stack(true);
4123 
4124   do {
4125     if (!has_aborted() && _curr_region != NULL) {
4126       // This means that we're already holding on to a region.
4127       assert(_finger != NULL, "if region is not NULL, then the finger "
4128              "should not be NULL either");
4129 
4130       // We might have restarted this task after an evacuation pause
4131       // which might have evacuated the region we're holding on to
4132       // underneath our feet. Let's read its limit again to make sure
4133       // that we do not iterate over a region of the heap that
4134       // contains garbage (update_region_limit() will also move
4135       // _finger to the start of the region if it is found empty).
4136       update_region_limit();
4137       // We will start from _finger not from the start of the region,
4138       // as we might be restarting this task after aborting half-way
4139       // through scanning this region. In this case, _finger points to
4140       // the address where we last found a marked object. If this is a
4141       // fresh region, _finger points to start().
4142       MemRegion mr = MemRegion(_finger, _region_limit);
4143 
4144       if (_cm->verbose_low()) {
4145         gclog_or_tty->print_cr("[%u] we're scanning part "
4146                                "["PTR_FORMAT", "PTR_FORMAT") "
4147                                "of region "HR_FORMAT,
4148                                _worker_id, p2i(_finger), p2i(_region_limit),
4149                                HR_FORMAT_PARAMS(_curr_region));
4150       }
4151 
4152       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
4153              "humongous regions should go around loop once only");
4154 
4155       // Some special cases:
4156       // If the memory region is empty, we can just give up the region.
4157       // If the current region is humongous then we only need to check
4158       // the bitmap for the bit associated with the start of the object,
4159       // scan the object if it's live, and give up the region.
4160       // Otherwise, let's iterate over the bitmap of the part of the region
4161       // that is left.
4162       // If the iteration is successful, give up the region.
4163       if (mr.is_empty()) {
4164         giveup_current_region();
4165         regular_clock_call();
4166       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
4167         if (_nextMarkBitMap->isMarked(mr.start())) {
4168           // The object is marked - apply the closure
4169           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4170           bitmap_closure.do_bit(offset);
4171         }
4172         // Even if this task aborted while scanning the humongous object
4173         // we can (and should) give up the current region.
4174         giveup_current_region();
4175         regular_clock_call();
4176       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4177         giveup_current_region();
4178         regular_clock_call();
4179       } else {
4180         assert(has_aborted(), "currently the only way to do so");
4181         // The only way to abort the bitmap iteration is to return
4182         // false from the do_bit() method. However, inside the
4183         // do_bit() method we move the _finger to point to the
4184         // object currently being looked at. So, if we bail out, we
4185         // have definitely set _finger to something non-null.
4186         assert(_finger != NULL, "invariant");
4187 
4188         // Region iteration was actually aborted. So now _finger
4189         // points to the address of the object we last scanned. If we
4190         // leave it there, when we restart this task, we will rescan
4191         // the object. It is easy to avoid this. We move the finger by
4192         // enough to point to the next possible object header (the
4193         // bitmap knows by how much we need to move it as it knows its
4194         // granularity).
4195         assert(_finger < _region_limit, "invariant");
4196         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4197         // Check if bitmap iteration was aborted while scanning the last object
4198         if (new_finger >= _region_limit) {
4199           giveup_current_region();
4200         } else {
4201           move_finger_to(new_finger);
4202         }
4203       }
4204     }
4205     // At this point we have either completed iterating over the
4206     // region we were holding on to, or we have aborted.
4207 
4208     // We then partially drain the local queue and the global stack.
4209     // (Do we really need this?)
4210     drain_local_queue(true);
4211     drain_global_stack(true);
4212 
4213     // Read the note on the claim_region() method on why it might
4214     // return NULL with potentially more regions available for
4215     // claiming and why we have to check out_of_regions() to determine
4216     // whether we're done or not.
4217     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4218       // We are going to try to claim a new region. We should have
4219       // given up on the previous one.
4220       // Separated the asserts so that we know which one fires.
4221       assert(_curr_region  == NULL, "invariant");
4222       assert(_finger       == NULL, "invariant");
4223       assert(_region_limit == NULL, "invariant");
4224       if (_cm->verbose_low()) {
4225         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4226       }
4227       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4228       if (claimed_region != NULL) {
4229         // Yes, we managed to claim one
4230         statsOnly( ++_regions_claimed );
4231 
4232         if (_cm->verbose_low()) {
4233           gclog_or_tty->print_cr("[%u] we successfully claimed "
4234                                  "region "PTR_FORMAT,
4235                                  _worker_id, p2i(claimed_region));
4236         }
4237 
4238         setup_for_region(claimed_region);
4239         assert(_curr_region == claimed_region, "invariant");
4240       }
4241       // It is important to call the regular clock here. It might take
4242       // a while to claim a region if, for example, we hit a large
4243       // block of empty regions. So we need to call the regular clock
4244       // method once round the loop to make sure it's called
4245       // frequently enough.
4246       regular_clock_call();
4247     }
4248 
4249     if (!has_aborted() && _curr_region == NULL) {
4250       assert(_cm->out_of_regions(),
4251              "at this point we should be out of regions");
4252     }
4253   } while ( _curr_region != NULL && !has_aborted());
4254 
4255   if (!has_aborted()) {
4256     // We cannot check whether the global stack is empty, since other
4257     // tasks might be pushing objects to it concurrently.
4258     assert(_cm->out_of_regions(),
4259            "at this point we should be out of regions");
4260 
4261     if (_cm->verbose_low()) {
4262       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4263     }
4264 
4265     // Try to reduce the number of available SATB buffers so that
4266     // remark has less work to do.
4267     drain_satb_buffers();
4268   }
4269 
4270   // Since we've done everything else, we can now totally drain the
4271   // local queue and global stack.
4272   drain_local_queue(false);
4273   drain_global_stack(false);
4274 
4275   // Attempt at work stealing from other task's queues.
4276   if (do_stealing && !has_aborted()) {
4277     // We have not aborted. This means that we have finished all that
4278     // we could. Let's try to do some stealing...
4279 
4280     // We cannot check whether the global stack is empty, since other
4281     // tasks might be pushing objects to it concurrently.
4282     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4283            "only way to reach here");
4284 
4285     if (_cm->verbose_low()) {
4286       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4287     }
4288 
4289     while (!has_aborted()) {
4290       oop obj;
4291       statsOnly( ++_steal_attempts );
4292 
4293       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4294         if (_cm->verbose_medium()) {
4295           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4296                                  _worker_id, p2i((void*) obj));
4297         }
4298 
4299         statsOnly( ++_steals );
4300 
4301         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4302                "any stolen object should be marked");
4303         scan_object(obj);
4304 
4305         // And since we're towards the end, let's totally drain the
4306         // local queue and global stack.
4307         drain_local_queue(false);
4308         drain_global_stack(false);
4309       } else {
4310         break;
4311       }
4312     }
4313   }
4314 
4315   // If we are about to wrap up and go into termination, check if we
4316   // should raise the overflow flag.
4317   if (do_termination && !has_aborted()) {
4318     if (_cm->force_overflow()->should_force()) {
4319       _cm->set_has_overflown();
4320       regular_clock_call();
4321     }
4322   }
4323 
4324   // We still haven't aborted. Now, let's try to get into the
4325   // termination protocol.
4326   if (do_termination && !has_aborted()) {
4327     // We cannot check whether the global stack is empty, since other
4328     // tasks might be concurrently pushing objects on it.
4329     // Separated the asserts so that we know which one fires.
4330     assert(_cm->out_of_regions(), "only way to reach here");
4331     assert(_task_queue->size() == 0, "only way to reach here");
4332 
4333     if (_cm->verbose_low()) {
4334       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4335     }
4336 
4337     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4338 
4339     // The CMTask class also extends the TerminatorTerminator class,
4340     // hence its should_exit_termination() method will also decide
4341     // whether to exit the termination protocol or not.
4342     bool finished = (is_serial ||
4343                      _cm->terminator()->offer_termination(this));
4344     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4345     _termination_time_ms +=
4346       termination_end_time_ms - _termination_start_time_ms;
4347 
4348     if (finished) {
4349       // We're all done.
4350 
4351       if (_worker_id == 0) {
4352         // let's allow task 0 to do this
4353         if (concurrent()) {
4354           assert(_cm->concurrent_marking_in_progress(), "invariant");
4355           // we need to set this to false before the next
4356           // safepoint. This way we ensure that the marking phase
4357           // doesn't observe any more heap expansions.
4358           _cm->clear_concurrent_marking_in_progress();
4359         }
4360       }
4361 
4362       // We can now guarantee that the global stack is empty, since
4363       // all other tasks have finished. We separated the guarantees so
4364       // that, if a condition is false, we can immediately find out
4365       // which one.
4366       guarantee(_cm->out_of_regions(), "only way to reach here");
4367       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4368       guarantee(_task_queue->size() == 0, "only way to reach here");
4369       guarantee(!_cm->has_overflown(), "only way to reach here");
4370       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4371 
4372       if (_cm->verbose_low()) {
4373         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4374       }
4375     } else {
4376       // Apparently there's more work to do. Let's abort this task. It
4377       // will restart it and we can hopefully find more things to do.
4378 
4379       if (_cm->verbose_low()) {
4380         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4381                                _worker_id);
4382       }
4383 
4384       set_has_aborted();
4385       statsOnly( ++_aborted_termination );
4386     }
4387   }
4388 
4389   // Mainly for debugging purposes to make sure that a pointer to the
4390   // closure which was statically allocated in this frame doesn't
4391   // escape it by accident.
4392   set_cm_oop_closure(NULL);
4393   double end_time_ms = os::elapsedVTime() * 1000.0;
4394   double elapsed_time_ms = end_time_ms - _start_time_ms;
4395   // Update the step history.
4396   _step_times_ms.add(elapsed_time_ms);
4397 
4398   if (has_aborted()) {
4399     // The task was aborted for some reason.
4400 
4401     statsOnly( ++_aborted );
4402 
4403     if (_has_timed_out) {
4404       double diff_ms = elapsed_time_ms - _time_target_ms;
4405       // Keep statistics of how well we did with respect to hitting
4406       // our target only if we actually timed out (if we aborted for
4407       // other reasons, then the results might get skewed).
4408       _marking_step_diffs_ms.add(diff_ms);
4409     }
4410 
4411     if (_cm->has_overflown()) {
4412       // This is the interesting one. We aborted because a global
4413       // overflow was raised. This means we have to restart the
4414       // marking phase and start iterating over regions. However, in
4415       // order to do this we have to make sure that all tasks stop
4416       // what they are doing and re-initialize in a safe manner. We
4417       // will achieve this with the use of two barrier sync points.
4418 
4419       if (_cm->verbose_low()) {
4420         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4421       }
4422 
4423       if (!is_serial) {
4424         // We only need to enter the sync barrier if being called
4425         // from a parallel context
4426         _cm->enter_first_sync_barrier(_worker_id);
4427 
4428         // When we exit this sync barrier we know that all tasks have
4429         // stopped doing marking work. So, it's now safe to
4430         // re-initialize our data structures. At the end of this method,
4431         // task 0 will clear the global data structures.
4432       }
4433 
4434       statsOnly( ++_aborted_overflow );
4435 
4436       // We clear the local state of this task...
4437       clear_region_fields();
4438 
4439       if (!is_serial) {
4440         // ...and enter the second barrier.
4441         _cm->enter_second_sync_barrier(_worker_id);
4442       }
4443       // At this point, if we're during the concurrent phase of
4444       // marking, everything has been re-initialized and we're
4445       // ready to restart.
4446     }
4447 
4448     if (_cm->verbose_low()) {
4449       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4450                              "elapsed = %1.2lfms <<<<<<<<<<",
4451                              _worker_id, _time_target_ms, elapsed_time_ms);
4452       if (_cm->has_aborted()) {
4453         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4454                                _worker_id);
4455       }
4456     }
4457   } else {
4458     if (_cm->verbose_low()) {
4459       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4460                              "elapsed = %1.2lfms <<<<<<<<<<",
4461                              _worker_id, _time_target_ms, elapsed_time_ms);
4462     }
4463   }
4464 
4465   _claimed = false;
4466 }
4467 
4468 CMTask::CMTask(uint worker_id,
4469                ConcurrentMark* cm,
4470                size_t* marked_bytes,
4471                BitMap* card_bm,
4472                CMTaskQueue* task_queue,
4473                CMTaskQueueSet* task_queues)
4474   : _g1h(G1CollectedHeap::heap()),
4475     _worker_id(worker_id), _cm(cm),
4476     _claimed(false),
4477     _nextMarkBitMap(NULL), _hash_seed(17),
4478     _task_queue(task_queue),
4479     _task_queues(task_queues),
4480     _cm_oop_closure(NULL),
4481     _marked_bytes_array(marked_bytes),
4482     _card_bm(card_bm) {
4483   guarantee(task_queue != NULL, "invariant");
4484   guarantee(task_queues != NULL, "invariant");
4485 
4486   statsOnly( _clock_due_to_scanning = 0;
4487              _clock_due_to_marking  = 0 );
4488 
4489   _marking_step_diffs_ms.add(0.5);
4490 }
4491 
4492 // These are formatting macros that are used below to ensure
4493 // consistent formatting. The *_H_* versions are used to format the
4494 // header for a particular value and they should be kept consistent
4495 // with the corresponding macro. Also note that most of the macros add
4496 // the necessary white space (as a prefix) which makes them a bit
4497 // easier to compose.
4498 
4499 // All the output lines are prefixed with this string to be able to
4500 // identify them easily in a large log file.
4501 #define G1PPRL_LINE_PREFIX            "###"
4502 
4503 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4504 #ifdef _LP64
4505 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4506 #else // _LP64
4507 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4508 #endif // _LP64
4509 
4510 // For per-region info
4511 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4512 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4513 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4514 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4515 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4516 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4517 
4518 // For summary info
4519 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4520 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4521 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4522 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4523 
4524 G1PrintRegionLivenessInfoClosure::
4525 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4526   : _out(out),
4527     _total_used_bytes(0), _total_capacity_bytes(0),
4528     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4529     _hum_used_bytes(0), _hum_capacity_bytes(0),
4530     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4531     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4532   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4533   MemRegion g1_reserved = g1h->g1_reserved();
4534   double now = os::elapsedTime();
4535 
4536   // Print the header of the output.
4537   _out->cr();
4538   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4539   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4540                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4541                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4542                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4543                  HeapRegion::GrainBytes);
4544   _out->print_cr(G1PPRL_LINE_PREFIX);
4545   _out->print_cr(G1PPRL_LINE_PREFIX
4546                 G1PPRL_TYPE_H_FORMAT
4547                 G1PPRL_ADDR_BASE_H_FORMAT
4548                 G1PPRL_BYTE_H_FORMAT
4549                 G1PPRL_BYTE_H_FORMAT
4550                 G1PPRL_BYTE_H_FORMAT
4551                 G1PPRL_DOUBLE_H_FORMAT
4552                 G1PPRL_BYTE_H_FORMAT
4553                 G1PPRL_BYTE_H_FORMAT,
4554                 "type", "address-range",
4555                 "used", "prev-live", "next-live", "gc-eff",
4556                 "remset", "code-roots");
4557   _out->print_cr(G1PPRL_LINE_PREFIX
4558                 G1PPRL_TYPE_H_FORMAT
4559                 G1PPRL_ADDR_BASE_H_FORMAT
4560                 G1PPRL_BYTE_H_FORMAT
4561                 G1PPRL_BYTE_H_FORMAT
4562                 G1PPRL_BYTE_H_FORMAT
4563                 G1PPRL_DOUBLE_H_FORMAT
4564                 G1PPRL_BYTE_H_FORMAT
4565                 G1PPRL_BYTE_H_FORMAT,
4566                 "", "",
4567                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4568                 "(bytes)", "(bytes)");
4569 }
4570 
4571 // It takes as a parameter a reference to one of the _hum_* fields, it
4572 // deduces the corresponding value for a region in a humongous region
4573 // series (either the region size, or what's left if the _hum_* field
4574 // is < the region size), and updates the _hum_* field accordingly.
4575 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4576   size_t bytes = 0;
4577   // The > 0 check is to deal with the prev and next live bytes which
4578   // could be 0.
4579   if (*hum_bytes > 0) {
4580     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4581     *hum_bytes -= bytes;
4582   }
4583   return bytes;
4584 }
4585 
4586 // It deduces the values for a region in a humongous region series
4587 // from the _hum_* fields and updates those accordingly. It assumes
4588 // that that _hum_* fields have already been set up from the "starts
4589 // humongous" region and we visit the regions in address order.
4590 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4591                                                      size_t* capacity_bytes,
4592                                                      size_t* prev_live_bytes,
4593                                                      size_t* next_live_bytes) {
4594   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4595   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4596   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4597   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4598   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4599 }
4600 
4601 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4602   const char* type       = r->get_type_str();
4603   HeapWord* bottom       = r->bottom();
4604   HeapWord* end          = r->end();
4605   size_t capacity_bytes  = r->capacity();
4606   size_t used_bytes      = r->used();
4607   size_t prev_live_bytes = r->live_bytes();
4608   size_t next_live_bytes = r->next_live_bytes();
4609   double gc_eff          = r->gc_efficiency();
4610   size_t remset_bytes    = r->rem_set()->mem_size();
4611   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4612 
4613   if (r->is_starts_humongous()) {
4614     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4615            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4616            "they should have been zeroed after the last time we used them");
4617     // Set up the _hum_* fields.
4618     _hum_capacity_bytes  = capacity_bytes;
4619     _hum_used_bytes      = used_bytes;
4620     _hum_prev_live_bytes = prev_live_bytes;
4621     _hum_next_live_bytes = next_live_bytes;
4622     get_hum_bytes(&used_bytes, &capacity_bytes,
4623                   &prev_live_bytes, &next_live_bytes);
4624     end = bottom + HeapRegion::GrainWords;
4625   } else if (r->is_continues_humongous()) {
4626     get_hum_bytes(&used_bytes, &capacity_bytes,
4627                   &prev_live_bytes, &next_live_bytes);
4628     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4629   }
4630 
4631   _total_used_bytes      += used_bytes;
4632   _total_capacity_bytes  += capacity_bytes;
4633   _total_prev_live_bytes += prev_live_bytes;
4634   _total_next_live_bytes += next_live_bytes;
4635   _total_remset_bytes    += remset_bytes;
4636   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4637 
4638   // Print a line for this particular region.
4639   _out->print_cr(G1PPRL_LINE_PREFIX
4640                  G1PPRL_TYPE_FORMAT
4641                  G1PPRL_ADDR_BASE_FORMAT
4642                  G1PPRL_BYTE_FORMAT
4643                  G1PPRL_BYTE_FORMAT
4644                  G1PPRL_BYTE_FORMAT
4645                  G1PPRL_DOUBLE_FORMAT
4646                  G1PPRL_BYTE_FORMAT
4647                  G1PPRL_BYTE_FORMAT,
4648                  type, p2i(bottom), p2i(end),
4649                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4650                  remset_bytes, strong_code_roots_bytes);
4651 
4652   return false;
4653 }
4654 
4655 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4656   // add static memory usages to remembered set sizes
4657   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4658   // Print the footer of the output.
4659   _out->print_cr(G1PPRL_LINE_PREFIX);
4660   _out->print_cr(G1PPRL_LINE_PREFIX
4661                  " SUMMARY"
4662                  G1PPRL_SUM_MB_FORMAT("capacity")
4663                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4664                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4665                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4666                  G1PPRL_SUM_MB_FORMAT("remset")
4667                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4668                  bytes_to_mb(_total_capacity_bytes),
4669                  bytes_to_mb(_total_used_bytes),
4670                  perc(_total_used_bytes, _total_capacity_bytes),
4671                  bytes_to_mb(_total_prev_live_bytes),
4672                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4673                  bytes_to_mb(_total_next_live_bytes),
4674                  perc(_total_next_live_bytes, _total_capacity_bytes),
4675                  bytes_to_mb(_total_remset_bytes),
4676                  bytes_to_mb(_total_strong_code_roots_bytes));
4677   _out->cr();
4678 }