1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "memory/genOopClosures.inline.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "services/memTracker.hpp"
  46 
  47 // Concurrent marking bit map wrapper
  48 
  49 CMBitMapRO::CMBitMapRO(int shifter) :
  50   _bm(),
  51   _shifter(shifter) {
  52   _bmStartWord = 0;
  53   _bmWordSize = 0;
  54 }
  55 
  56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  57                                                HeapWord* limit) const {
  58   // First we must round addr *up* to a possible object boundary.
  59   addr = (HeapWord*)align_size_up((intptr_t)addr,
  60                                   HeapWordSize << _shifter);
  61   size_t addrOffset = heapWordToOffset(addr);
  62   if (limit == NULL) {
  63     limit = _bmStartWord + _bmWordSize;
  64   }
  65   size_t limitOffset = heapWordToOffset(limit);
  66   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  67   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  68   assert(nextAddr >= addr, "get_next_one postcondition");
  69   assert(nextAddr == limit || isMarked(nextAddr),
  70          "get_next_one postcondition");
  71   return nextAddr;
  72 }
  73 
  74 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
  75                                                  HeapWord* limit) const {
  76   size_t addrOffset = heapWordToOffset(addr);
  77   if (limit == NULL) {
  78     limit = _bmStartWord + _bmWordSize;
  79   }
  80   size_t limitOffset = heapWordToOffset(limit);
  81   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  82   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  83   assert(nextAddr >= addr, "get_next_one postcondition");
  84   assert(nextAddr == limit || !isMarked(nextAddr),
  85          "get_next_one postcondition");
  86   return nextAddr;
  87 }
  88 
  89 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  90   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  91   return (int) (diff >> _shifter);
  92 }
  93 
  94 #ifndef PRODUCT
  95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
  96   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  97   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  98          "size inconsistency");
  99   return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
 100          _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 101 }
 102 #endif
 103 
 104 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 105   _bm.print_on_error(st, prefix);
 106 }
 107 
 108 bool CMBitMap::allocate(ReservedSpace heap_rs) {
 109   _bmStartWord = (HeapWord*)(heap_rs.base());
 110   _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
 111   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
 112                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 113   if (!brs.is_reserved()) {
 114     warning("ConcurrentMark marking bit map allocation failure");
 115     return false;
 116   }
 117   MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
 118   // For now we'll just commit all of the bit map up front.
 119   // Later on we'll try to be more parsimonious with swap.
 120   if (!_virtual_space.initialize(brs, brs.size())) {
 121     warning("ConcurrentMark marking bit map backing store failure");
 122     return false;
 123   }
 124   assert(_virtual_space.committed_size() == brs.size(),
 125          "didn't reserve backing store for all of concurrent marking bit map?");
 126   _bm.set_map((uintptr_t*)_virtual_space.low());
 127   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
 128          _bmWordSize, "inconsistency in bit map sizing");
 129   _bm.set_size(_bmWordSize >> _shifter);
 130   return true;
 131 }
 132 
 133 void CMBitMap::clearAll() {
 134   _bm.clear();
 135   return;
 136 }
 137 
 138 void CMBitMap::markRange(MemRegion mr) {
 139   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 140   assert(!mr.is_empty(), "unexpected empty region");
 141   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 142           ((HeapWord *) mr.end())),
 143          "markRange memory region end is not card aligned");
 144   // convert address range into offset range
 145   _bm.at_put_range(heapWordToOffset(mr.start()),
 146                    heapWordToOffset(mr.end()), true);
 147 }
 148 
 149 void CMBitMap::clearRange(MemRegion mr) {
 150   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 151   assert(!mr.is_empty(), "unexpected empty region");
 152   // convert address range into offset range
 153   _bm.at_put_range(heapWordToOffset(mr.start()),
 154                    heapWordToOffset(mr.end()), false);
 155 }
 156 
 157 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 158                                             HeapWord* end_addr) {
 159   HeapWord* start = getNextMarkedWordAddress(addr);
 160   start = MIN2(start, end_addr);
 161   HeapWord* end   = getNextUnmarkedWordAddress(start);
 162   end = MIN2(end, end_addr);
 163   assert(start <= end, "Consistency check");
 164   MemRegion mr(start, end);
 165   if (!mr.is_empty()) {
 166     clearRange(mr);
 167   }
 168   return mr;
 169 }
 170 
 171 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 172   _base(NULL), _cm(cm)
 173 #ifdef ASSERT
 174   , _drain_in_progress(false)
 175   , _drain_in_progress_yields(false)
 176 #endif
 177 {}
 178 
 179 bool CMMarkStack::allocate(size_t capacity) {
 180   // allocate a stack of the requisite depth
 181   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 182   if (!rs.is_reserved()) {
 183     warning("ConcurrentMark MarkStack allocation failure");
 184     return false;
 185   }
 186   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 187   if (!_virtual_space.initialize(rs, rs.size())) {
 188     warning("ConcurrentMark MarkStack backing store failure");
 189     // Release the virtual memory reserved for the marking stack
 190     rs.release();
 191     return false;
 192   }
 193   assert(_virtual_space.committed_size() == rs.size(),
 194          "Didn't reserve backing store for all of ConcurrentMark stack?");
 195   _base = (oop*) _virtual_space.low();
 196   setEmpty();
 197   _capacity = (jint) capacity;
 198   _saved_index = -1;
 199   _should_expand = false;
 200   NOT_PRODUCT(_max_depth = 0);
 201   return true;
 202 }
 203 
 204 void CMMarkStack::expand() {
 205   // Called, during remark, if we've overflown the marking stack during marking.
 206   assert(isEmpty(), "stack should been emptied while handling overflow");
 207   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 208   // Clear expansion flag
 209   _should_expand = false;
 210   if (_capacity == (jint) MarkStackSizeMax) {
 211     if (PrintGCDetails && Verbose) {
 212       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 213     }
 214     return;
 215   }
 216   // Double capacity if possible
 217   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 218   // Do not give up existing stack until we have managed to
 219   // get the double capacity that we desired.
 220   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 221                                                            sizeof(oop)));
 222   if (rs.is_reserved()) {
 223     // Release the backing store associated with old stack
 224     _virtual_space.release();
 225     // Reinitialize virtual space for new stack
 226     if (!_virtual_space.initialize(rs, rs.size())) {
 227       fatal("Not enough swap for expanded marking stack capacity");
 228     }
 229     _base = (oop*)(_virtual_space.low());
 230     _index = 0;
 231     _capacity = new_capacity;
 232   } else {
 233     if (PrintGCDetails && Verbose) {
 234       // Failed to double capacity, continue;
 235       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 236                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 237                           _capacity / K, new_capacity / K);
 238     }
 239   }
 240 }
 241 
 242 void CMMarkStack::set_should_expand() {
 243   // If we're resetting the marking state because of an
 244   // marking stack overflow, record that we should, if
 245   // possible, expand the stack.
 246   _should_expand = _cm->has_overflown();
 247 }
 248 
 249 CMMarkStack::~CMMarkStack() {
 250   if (_base != NULL) {
 251     _base = NULL;
 252     _virtual_space.release();
 253   }
 254 }
 255 
 256 void CMMarkStack::par_push(oop ptr) {
 257   while (true) {
 258     if (isFull()) {
 259       _overflow = true;
 260       return;
 261     }
 262     // Otherwise...
 263     jint index = _index;
 264     jint next_index = index+1;
 265     jint res = Atomic::cmpxchg(next_index, &_index, index);
 266     if (res == index) {
 267       _base[index] = ptr;
 268       // Note that we don't maintain this atomically.  We could, but it
 269       // doesn't seem necessary.
 270       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 271       return;
 272     }
 273     // Otherwise, we need to try again.
 274   }
 275 }
 276 
 277 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 278   while (true) {
 279     if (isFull()) {
 280       _overflow = true;
 281       return;
 282     }
 283     // Otherwise...
 284     jint index = _index;
 285     jint next_index = index + n;
 286     if (next_index > _capacity) {
 287       _overflow = true;
 288       return;
 289     }
 290     jint res = Atomic::cmpxchg(next_index, &_index, index);
 291     if (res == index) {
 292       for (int i = 0; i < n; i++) {
 293         int  ind = index + i;
 294         assert(ind < _capacity, "By overflow test above.");
 295         _base[ind] = ptr_arr[i];
 296       }
 297       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 298       return;
 299     }
 300     // Otherwise, we need to try again.
 301   }
 302 }
 303 
 304 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 305   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 306   jint start = _index;
 307   jint next_index = start + n;
 308   if (next_index > _capacity) {
 309     _overflow = true;
 310     return;
 311   }
 312   // Otherwise.
 313   _index = next_index;
 314   for (int i = 0; i < n; i++) {
 315     int ind = start + i;
 316     assert(ind < _capacity, "By overflow test above.");
 317     _base[ind] = ptr_arr[i];
 318   }
 319   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 320 }
 321 
 322 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 323   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 324   jint index = _index;
 325   if (index == 0) {
 326     *n = 0;
 327     return false;
 328   } else {
 329     int k = MIN2(max, index);
 330     jint  new_ind = index - k;
 331     for (int j = 0; j < k; j++) {
 332       ptr_arr[j] = _base[new_ind + j];
 333     }
 334     _index = new_ind;
 335     *n = k;
 336     return true;
 337   }
 338 }
 339 
 340 template<class OopClosureClass>
 341 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 342   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 343          || SafepointSynchronize::is_at_safepoint(),
 344          "Drain recursion must be yield-safe.");
 345   bool res = true;
 346   debug_only(_drain_in_progress = true);
 347   debug_only(_drain_in_progress_yields = yield_after);
 348   while (!isEmpty()) {
 349     oop newOop = pop();
 350     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 351     assert(newOop->is_oop(), "Expected an oop");
 352     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 353            "only grey objects on this stack");
 354     newOop->oop_iterate(cl);
 355     if (yield_after && _cm->do_yield_check()) {
 356       res = false;
 357       break;
 358     }
 359   }
 360   debug_only(_drain_in_progress = false);
 361   return res;
 362 }
 363 
 364 void CMMarkStack::note_start_of_gc() {
 365   assert(_saved_index == -1,
 366          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 367   _saved_index = _index;
 368 }
 369 
 370 void CMMarkStack::note_end_of_gc() {
 371   // This is intentionally a guarantee, instead of an assert. If we
 372   // accidentally add something to the mark stack during GC, it
 373   // will be a correctness issue so it's better if we crash. we'll
 374   // only check this once per GC anyway, so it won't be a performance
 375   // issue in any way.
 376   guarantee(_saved_index == _index,
 377             err_msg("saved index: %d index: %d", _saved_index, _index));
 378   _saved_index = -1;
 379 }
 380 
 381 void CMMarkStack::oops_do(OopClosure* f) {
 382   assert(_saved_index == _index,
 383          err_msg("saved index: %d index: %d", _saved_index, _index));
 384   for (int i = 0; i < _index; i += 1) {
 385     f->do_oop(&_base[i]);
 386   }
 387 }
 388 
 389 bool ConcurrentMark::not_yet_marked(oop obj) const {
 390   return _g1h->is_obj_ill(obj);
 391 }
 392 
 393 CMRootRegions::CMRootRegions() :
 394   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 395   _should_abort(false),  _next_survivor(NULL) { }
 396 
 397 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 398   _young_list = g1h->young_list();
 399   _cm = cm;
 400 }
 401 
 402 void CMRootRegions::prepare_for_scan() {
 403   assert(!scan_in_progress(), "pre-condition");
 404 
 405   // Currently, only survivors can be root regions.
 406   assert(_next_survivor == NULL, "pre-condition");
 407   _next_survivor = _young_list->first_survivor_region();
 408   _scan_in_progress = (_next_survivor != NULL);
 409   _should_abort = false;
 410 }
 411 
 412 HeapRegion* CMRootRegions::claim_next() {
 413   if (_should_abort) {
 414     // If someone has set the should_abort flag, we return NULL to
 415     // force the caller to bail out of their loop.
 416     return NULL;
 417   }
 418 
 419   // Currently, only survivors can be root regions.
 420   HeapRegion* res = _next_survivor;
 421   if (res != NULL) {
 422     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 423     // Read it again in case it changed while we were waiting for the lock.
 424     res = _next_survivor;
 425     if (res != NULL) {
 426       if (res == _young_list->last_survivor_region()) {
 427         // We just claimed the last survivor so store NULL to indicate
 428         // that we're done.
 429         _next_survivor = NULL;
 430       } else {
 431         _next_survivor = res->get_next_young_region();
 432       }
 433     } else {
 434       // Someone else claimed the last survivor while we were trying
 435       // to take the lock so nothing else to do.
 436     }
 437   }
 438   assert(res == NULL || res->is_survivor(), "post-condition");
 439 
 440   return res;
 441 }
 442 
 443 void CMRootRegions::scan_finished() {
 444   assert(scan_in_progress(), "pre-condition");
 445 
 446   // Currently, only survivors can be root regions.
 447   if (!_should_abort) {
 448     assert(_next_survivor == NULL, "we should have claimed all survivors");
 449   }
 450   _next_survivor = NULL;
 451 
 452   {
 453     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 454     _scan_in_progress = false;
 455     RootRegionScan_lock->notify_all();
 456   }
 457 }
 458 
 459 bool CMRootRegions::wait_until_scan_finished() {
 460   if (!scan_in_progress()) return false;
 461 
 462   {
 463     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 464     while (scan_in_progress()) {
 465       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 466     }
 467   }
 468   return true;
 469 }
 470 
 471 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 472 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 473 #endif // _MSC_VER
 474 
 475 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 476   return MAX2((n_par_threads + 2) / 4, 1U);
 477 }
 478 
 479 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 480   _g1h(g1h),
 481   _markBitMap1(MinObjAlignment - 1),
 482   _markBitMap2(MinObjAlignment - 1),
 483 
 484   _parallel_marking_threads(0),
 485   _max_parallel_marking_threads(0),
 486   _sleep_factor(0.0),
 487   _marking_task_overhead(1.0),
 488   _cleanup_sleep_factor(0.0),
 489   _cleanup_task_overhead(1.0),
 490   _cleanup_list("Cleanup List"),
 491   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 492   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 493             CardTableModRefBS::card_shift,
 494             false /* in_resource_area*/),
 495 
 496   _prevMarkBitMap(&_markBitMap1),
 497   _nextMarkBitMap(&_markBitMap2),
 498 
 499   _markStack(this),
 500   // _finger set in set_non_marking_state
 501 
 502   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 503   // _active_tasks set in set_non_marking_state
 504   // _tasks set inside the constructor
 505   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 506   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 507 
 508   _has_overflown(false),
 509   _concurrent(false),
 510   _has_aborted(false),
 511   _restart_for_overflow(false),
 512   _concurrent_marking_in_progress(false),
 513 
 514   // _verbose_level set below
 515 
 516   _init_times(),
 517   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 518   _cleanup_times(),
 519   _total_counting_time(0.0),
 520   _total_rs_scrub_time(0.0),
 521 
 522   _parallel_workers(NULL),
 523 
 524   _count_card_bitmaps(NULL),
 525   _count_marked_bytes(NULL),
 526   _completed_initialization(false) {
 527   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 528   if (verbose_level < no_verbose) {
 529     verbose_level = no_verbose;
 530   }
 531   if (verbose_level > high_verbose) {
 532     verbose_level = high_verbose;
 533   }
 534   _verbose_level = verbose_level;
 535 
 536   if (verbose_low()) {
 537     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 538                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 539   }
 540 
 541   if (!_markBitMap1.allocate(heap_rs)) {
 542     warning("Failed to allocate first CM bit map");
 543     return;
 544   }
 545   if (!_markBitMap2.allocate(heap_rs)) {
 546     warning("Failed to allocate second CM bit map");
 547     return;
 548   }
 549 
 550   // Create & start a ConcurrentMark thread.
 551   _cmThread = new ConcurrentMarkThread(this);
 552   assert(cmThread() != NULL, "CM Thread should have been created");
 553   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 554 
 555   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 556   assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
 557   assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 558 
 559   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 560   satb_qs.set_buffer_size(G1SATBBufferSize);
 561 
 562   _root_regions.init(_g1h, this);
 563 
 564   if (ConcGCThreads > ParallelGCThreads) {
 565     warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
 566             "than ParallelGCThreads (" UINT32_FORMAT ").",
 567             ConcGCThreads, ParallelGCThreads);
 568     return;
 569   }
 570   if (ParallelGCThreads == 0) {
 571     // if we are not running with any parallel GC threads we will not
 572     // spawn any marking threads either
 573     _parallel_marking_threads =       0;
 574     _max_parallel_marking_threads =   0;
 575     _sleep_factor             =     0.0;
 576     _marking_task_overhead    =     1.0;
 577   } else {
 578     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 579       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 580       // if both are set
 581       _sleep_factor             = 0.0;
 582       _marking_task_overhead    = 1.0;
 583     } else if (G1MarkingOverheadPercent > 0) {
 584       // We will calculate the number of parallel marking threads based
 585       // on a target overhead with respect to the soft real-time goal
 586       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 587       double overall_cm_overhead =
 588         (double) MaxGCPauseMillis * marking_overhead /
 589         (double) GCPauseIntervalMillis;
 590       double cpu_ratio = 1.0 / (double) os::processor_count();
 591       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 592       double marking_task_overhead =
 593         overall_cm_overhead / marking_thread_num *
 594                                                 (double) os::processor_count();
 595       double sleep_factor =
 596                          (1.0 - marking_task_overhead) / marking_task_overhead;
 597 
 598       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 599       _sleep_factor             = sleep_factor;
 600       _marking_task_overhead    = marking_task_overhead;
 601     } else {
 602       // Calculate the number of parallel marking threads by scaling
 603       // the number of parallel GC threads.
 604       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 605       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 606       _sleep_factor             = 0.0;
 607       _marking_task_overhead    = 1.0;
 608     }
 609 
 610     assert(ConcGCThreads > 0, "Should have been set");
 611     _parallel_marking_threads = (uint) ConcGCThreads;
 612     _max_parallel_marking_threads = _parallel_marking_threads;
 613 
 614     if (parallel_marking_threads() > 1) {
 615       _cleanup_task_overhead = 1.0;
 616     } else {
 617       _cleanup_task_overhead = marking_task_overhead();
 618     }
 619     _cleanup_sleep_factor =
 620                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 621 
 622 #if 0
 623     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 624     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 625     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 626     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 627     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 628 #endif
 629 
 630     guarantee(parallel_marking_threads() > 0, "peace of mind");
 631     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 632          _max_parallel_marking_threads, false, true);
 633     if (_parallel_workers == NULL) {
 634       vm_exit_during_initialization("Failed necessary allocation.");
 635     } else {
 636       _parallel_workers->initialize_workers();
 637     }
 638   }
 639 
 640   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 641     uintx mark_stack_size =
 642       MIN2(MarkStackSizeMax,
 643           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 644     // Verify that the calculated value for MarkStackSize is in range.
 645     // It would be nice to use the private utility routine from Arguments.
 646     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 647       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 648               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 649               mark_stack_size, 1, MarkStackSizeMax);
 650       return;
 651     }
 652     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 653   } else {
 654     // Verify MarkStackSize is in range.
 655     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 656       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 657         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 658           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 659                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 660                   MarkStackSize, 1, MarkStackSizeMax);
 661           return;
 662         }
 663       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 664         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 665           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 666                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 667                   MarkStackSize, MarkStackSizeMax);
 668           return;
 669         }
 670       }
 671     }
 672   }
 673 
 674   if (!_markStack.allocate(MarkStackSize)) {
 675     warning("Failed to allocate CM marking stack");
 676     return;
 677   }
 678 
 679   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 680   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 681 
 682   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 683   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 684 
 685   BitMap::idx_t card_bm_size = _card_bm.size();
 686 
 687   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 688   _active_tasks = _max_worker_id;
 689 
 690   size_t max_regions = (size_t) _g1h->max_regions();
 691   for (uint i = 0; i < _max_worker_id; ++i) {
 692     CMTaskQueue* task_queue = new CMTaskQueue();
 693     task_queue->initialize();
 694     _task_queues->register_queue(i, task_queue);
 695 
 696     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 697     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 698 
 699     _tasks[i] = new CMTask(i, this,
 700                            _count_marked_bytes[i],
 701                            &_count_card_bitmaps[i],
 702                            task_queue, _task_queues);
 703 
 704     _accum_task_vtime[i] = 0.0;
 705   }
 706 
 707   // Calculate the card number for the bottom of the heap. Used
 708   // in biasing indexes into the accounting card bitmaps.
 709   _heap_bottom_card_num =
 710     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 711                                 CardTableModRefBS::card_shift);
 712 
 713   // Clear all the liveness counting data
 714   clear_all_count_data();
 715 
 716   // so that the call below can read a sensible value
 717   _heap_start = (HeapWord*) heap_rs.base();
 718   set_non_marking_state();
 719   _completed_initialization = true;
 720 }
 721 
 722 void ConcurrentMark::update_g1_committed(bool force) {
 723   // If concurrent marking is not in progress, then we do not need to
 724   // update _heap_end.
 725   if (!concurrent_marking_in_progress() && !force) return;
 726 
 727   MemRegion committed = _g1h->g1_committed();
 728   assert(committed.start() == _heap_start, "start shouldn't change");
 729   HeapWord* new_end = committed.end();
 730   if (new_end > _heap_end) {
 731     // The heap has been expanded.
 732 
 733     _heap_end = new_end;
 734   }
 735   // Notice that the heap can also shrink. However, this only happens
 736   // during a Full GC (at least currently) and the entire marking
 737   // phase will bail out and the task will not be restarted. So, let's
 738   // do nothing.
 739 }
 740 
 741 void ConcurrentMark::reset() {
 742   // Starting values for these two. This should be called in a STW
 743   // phase. CM will be notified of any future g1_committed expansions
 744   // will be at the end of evacuation pauses, when tasks are
 745   // inactive.
 746   MemRegion committed = _g1h->g1_committed();
 747   _heap_start = committed.start();
 748   _heap_end   = committed.end();
 749 
 750   // Separated the asserts so that we know which one fires.
 751   assert(_heap_start != NULL, "heap bounds should look ok");
 752   assert(_heap_end != NULL, "heap bounds should look ok");
 753   assert(_heap_start < _heap_end, "heap bounds should look ok");
 754 
 755   // Reset all the marking data structures and any necessary flags
 756   reset_marking_state();
 757 
 758   if (verbose_low()) {
 759     gclog_or_tty->print_cr("[global] resetting");
 760   }
 761 
 762   // We do reset all of them, since different phases will use
 763   // different number of active threads. So, it's easiest to have all
 764   // of them ready.
 765   for (uint i = 0; i < _max_worker_id; ++i) {
 766     _tasks[i]->reset(_nextMarkBitMap);
 767   }
 768 
 769   // we need this to make sure that the flag is on during the evac
 770   // pause with initial mark piggy-backed
 771   set_concurrent_marking_in_progress();
 772 }
 773 
 774 
 775 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 776   _markStack.set_should_expand();
 777   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 778   if (clear_overflow) {
 779     clear_has_overflown();
 780   } else {
 781     assert(has_overflown(), "pre-condition");
 782   }
 783   _finger = _heap_start;
 784 
 785   for (uint i = 0; i < _max_worker_id; ++i) {
 786     CMTaskQueue* queue = _task_queues->queue(i);
 787     queue->set_empty();
 788   }
 789 }
 790 
 791 void ConcurrentMark::set_concurrency(uint active_tasks) {
 792   assert(active_tasks <= _max_worker_id, "we should not have more");
 793 
 794   _active_tasks = active_tasks;
 795   // Need to update the three data structures below according to the
 796   // number of active threads for this phase.
 797   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 798   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 799   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 800 }
 801 
 802 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 803   set_concurrency(active_tasks);
 804 
 805   _concurrent = concurrent;
 806   // We propagate this to all tasks, not just the active ones.
 807   for (uint i = 0; i < _max_worker_id; ++i)
 808     _tasks[i]->set_concurrent(concurrent);
 809 
 810   if (concurrent) {
 811     set_concurrent_marking_in_progress();
 812   } else {
 813     // We currently assume that the concurrent flag has been set to
 814     // false before we start remark. At this point we should also be
 815     // in a STW phase.
 816     assert(!concurrent_marking_in_progress(), "invariant");
 817     assert(_finger == _heap_end,
 818            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 819                    _finger, _heap_end));
 820     update_g1_committed(true);
 821   }
 822 }
 823 
 824 void ConcurrentMark::set_non_marking_state() {
 825   // We set the global marking state to some default values when we're
 826   // not doing marking.
 827   reset_marking_state();
 828   _active_tasks = 0;
 829   clear_concurrent_marking_in_progress();
 830 }
 831 
 832 ConcurrentMark::~ConcurrentMark() {
 833   // The ConcurrentMark instance is never freed.
 834   ShouldNotReachHere();
 835 }
 836 
 837 void ConcurrentMark::clearNextBitmap() {
 838   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 839   G1CollectorPolicy* g1p = g1h->g1_policy();
 840 
 841   // Make sure that the concurrent mark thread looks to still be in
 842   // the current cycle.
 843   guarantee(cmThread()->during_cycle(), "invariant");
 844 
 845   // We are finishing up the current cycle by clearing the next
 846   // marking bitmap and getting it ready for the next cycle. During
 847   // this time no other cycle can start. So, let's make sure that this
 848   // is the case.
 849   guarantee(!g1h->mark_in_progress(), "invariant");
 850 
 851   // clear the mark bitmap (no grey objects to start with).
 852   // We need to do this in chunks and offer to yield in between
 853   // each chunk.
 854   HeapWord* start  = _nextMarkBitMap->startWord();
 855   HeapWord* end    = _nextMarkBitMap->endWord();
 856   HeapWord* cur    = start;
 857   size_t chunkSize = M;
 858   while (cur < end) {
 859     HeapWord* next = cur + chunkSize;
 860     if (next > end) {
 861       next = end;
 862     }
 863     MemRegion mr(cur,next);
 864     _nextMarkBitMap->clearRange(mr);
 865     cur = next;
 866     do_yield_check();
 867 
 868     // Repeat the asserts from above. We'll do them as asserts here to
 869     // minimize their overhead on the product. However, we'll have
 870     // them as guarantees at the beginning / end of the bitmap
 871     // clearing to get some checking in the product.
 872     assert(cmThread()->during_cycle(), "invariant");
 873     assert(!g1h->mark_in_progress(), "invariant");
 874   }
 875 
 876   // Clear the liveness counting data
 877   clear_all_count_data();
 878 
 879   // Repeat the asserts from above.
 880   guarantee(cmThread()->during_cycle(), "invariant");
 881   guarantee(!g1h->mark_in_progress(), "invariant");
 882 }
 883 
 884 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 885 public:
 886   bool doHeapRegion(HeapRegion* r) {
 887     if (!r->continuesHumongous()) {
 888       r->note_start_of_marking();
 889     }
 890     return false;
 891   }
 892 };
 893 
 894 void ConcurrentMark::checkpointRootsInitialPre() {
 895   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 896   G1CollectorPolicy* g1p = g1h->g1_policy();
 897 
 898   _has_aborted = false;
 899 
 900 #ifndef PRODUCT
 901   if (G1PrintReachableAtInitialMark) {
 902     print_reachable("at-cycle-start",
 903                     VerifyOption_G1UsePrevMarking, true /* all */);
 904   }
 905 #endif
 906 
 907   // Initialise marking structures. This has to be done in a STW phase.
 908   reset();
 909 
 910   // For each region note start of marking.
 911   NoteStartOfMarkHRClosure startcl;
 912   g1h->heap_region_iterate(&startcl);
 913 }
 914 
 915 
 916 void ConcurrentMark::checkpointRootsInitialPost() {
 917   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 918 
 919   // If we force an overflow during remark, the remark operation will
 920   // actually abort and we'll restart concurrent marking. If we always
 921   // force an oveflow during remark we'll never actually complete the
 922   // marking phase. So, we initilize this here, at the start of the
 923   // cycle, so that at the remaining overflow number will decrease at
 924   // every remark and we'll eventually not need to cause one.
 925   force_overflow_stw()->init();
 926 
 927   // Start Concurrent Marking weak-reference discovery.
 928   ReferenceProcessor* rp = g1h->ref_processor_cm();
 929   // enable ("weak") refs discovery
 930   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 931   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 932 
 933   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 934   // This is the start of  the marking cycle, we're expected all
 935   // threads to have SATB queues with active set to false.
 936   satb_mq_set.set_active_all_threads(true, /* new active value */
 937                                      false /* expected_active */);
 938 
 939   _root_regions.prepare_for_scan();
 940 
 941   // update_g1_committed() will be called at the end of an evac pause
 942   // when marking is on. So, it's also called at the end of the
 943   // initial-mark pause to update the heap end, if the heap expands
 944   // during it. No need to call it here.
 945 }
 946 
 947 /*
 948  * Notice that in the next two methods, we actually leave the STS
 949  * during the barrier sync and join it immediately afterwards. If we
 950  * do not do this, the following deadlock can occur: one thread could
 951  * be in the barrier sync code, waiting for the other thread to also
 952  * sync up, whereas another one could be trying to yield, while also
 953  * waiting for the other threads to sync up too.
 954  *
 955  * Note, however, that this code is also used during remark and in
 956  * this case we should not attempt to leave / enter the STS, otherwise
 957  * we'll either hit an asseert (debug / fastdebug) or deadlock
 958  * (product). So we should only leave / enter the STS if we are
 959  * operating concurrently.
 960  *
 961  * Because the thread that does the sync barrier has left the STS, it
 962  * is possible to be suspended for a Full GC or an evacuation pause
 963  * could occur. This is actually safe, since the entering the sync
 964  * barrier is one of the last things do_marking_step() does, and it
 965  * doesn't manipulate any data structures afterwards.
 966  */
 967 
 968 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 969   if (verbose_low()) {
 970     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 971   }
 972 
 973   if (concurrent()) {
 974     ConcurrentGCThread::stsLeave();
 975   }
 976   _first_overflow_barrier_sync.enter();
 977   if (concurrent()) {
 978     ConcurrentGCThread::stsJoin();
 979   }
 980   // at this point everyone should have synced up and not be doing any
 981   // more work
 982 
 983   if (verbose_low()) {
 984     gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 985   }
 986 
 987   // If we're executing the concurrent phase of marking, reset the marking
 988   // state; otherwise the marking state is reset after reference processing,
 989   // during the remark pause.
 990   // If we reset here as a result of an overflow during the remark we will
 991   // see assertion failures from any subsequent set_concurrency_and_phase()
 992   // calls.
 993   if (concurrent()) {
 994     // let the task associated with with worker 0 do this
 995     if (worker_id == 0) {
 996       // task 0 is responsible for clearing the global data structures
 997       // We should be here because of an overflow. During STW we should
 998       // not clear the overflow flag since we rely on it being true when
 999       // we exit this method to abort the pause and restart concurent
1000       // marking.
1001       reset_marking_state(true /* clear_overflow */);
1002       force_overflow()->update();
1003 
1004       if (G1Log::fine()) {
1005         gclog_or_tty->date_stamp(PrintGCDateStamps);
1006         gclog_or_tty->stamp(PrintGCTimeStamps);
1007         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1008       }
1009     }
1010   }
1011 
1012   // after this, each task should reset its own data structures then
1013   // then go into the second barrier
1014 }
1015 
1016 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1017   if (verbose_low()) {
1018     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1019   }
1020 
1021   if (concurrent()) {
1022     ConcurrentGCThread::stsLeave();
1023   }
1024   _second_overflow_barrier_sync.enter();
1025   if (concurrent()) {
1026     ConcurrentGCThread::stsJoin();
1027   }
1028   // at this point everything should be re-initialized and ready to go
1029 
1030   if (verbose_low()) {
1031     gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1032   }
1033 }
1034 
1035 #ifndef PRODUCT
1036 void ForceOverflowSettings::init() {
1037   _num_remaining = G1ConcMarkForceOverflow;
1038   _force = false;
1039   update();
1040 }
1041 
1042 void ForceOverflowSettings::update() {
1043   if (_num_remaining > 0) {
1044     _num_remaining -= 1;
1045     _force = true;
1046   } else {
1047     _force = false;
1048   }
1049 }
1050 
1051 bool ForceOverflowSettings::should_force() {
1052   if (_force) {
1053     _force = false;
1054     return true;
1055   } else {
1056     return false;
1057   }
1058 }
1059 #endif // !PRODUCT
1060 
1061 class CMConcurrentMarkingTask: public AbstractGangTask {
1062 private:
1063   ConcurrentMark*       _cm;
1064   ConcurrentMarkThread* _cmt;
1065 
1066 public:
1067   void work(uint worker_id) {
1068     assert(Thread::current()->is_ConcurrentGC_thread(),
1069            "this should only be done by a conc GC thread");
1070     ResourceMark rm;
1071 
1072     double start_vtime = os::elapsedVTime();
1073 
1074     ConcurrentGCThread::stsJoin();
1075 
1076     assert(worker_id < _cm->active_tasks(), "invariant");
1077     CMTask* the_task = _cm->task(worker_id);
1078     the_task->record_start_time();
1079     if (!_cm->has_aborted()) {
1080       do {
1081         double start_vtime_sec = os::elapsedVTime();
1082         double start_time_sec = os::elapsedTime();
1083         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1084 
1085         the_task->do_marking_step(mark_step_duration_ms,
1086                                   true  /* do_termination */,
1087                                   false /* is_serial*/);
1088 
1089         double end_time_sec = os::elapsedTime();
1090         double end_vtime_sec = os::elapsedVTime();
1091         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1092         double elapsed_time_sec = end_time_sec - start_time_sec;
1093         _cm->clear_has_overflown();
1094 
1095         bool ret = _cm->do_yield_check(worker_id);
1096 
1097         jlong sleep_time_ms;
1098         if (!_cm->has_aborted() && the_task->has_aborted()) {
1099           sleep_time_ms =
1100             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1101           ConcurrentGCThread::stsLeave();
1102           os::sleep(Thread::current(), sleep_time_ms, false);
1103           ConcurrentGCThread::stsJoin();
1104         }
1105         double end_time2_sec = os::elapsedTime();
1106         double elapsed_time2_sec = end_time2_sec - start_time_sec;
1107 
1108 #if 0
1109           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1110                                  "overhead %1.4lf",
1111                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1112                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
1113           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1114                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1115 #endif
1116       } while (!_cm->has_aborted() && the_task->has_aborted());
1117     }
1118     the_task->record_end_time();
1119     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1120 
1121     ConcurrentGCThread::stsLeave();
1122 
1123     double end_vtime = os::elapsedVTime();
1124     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1125   }
1126 
1127   CMConcurrentMarkingTask(ConcurrentMark* cm,
1128                           ConcurrentMarkThread* cmt) :
1129       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1130 
1131   ~CMConcurrentMarkingTask() { }
1132 };
1133 
1134 // Calculates the number of active workers for a concurrent
1135 // phase.
1136 uint ConcurrentMark::calc_parallel_marking_threads() {
1137   if (G1CollectedHeap::use_parallel_gc_threads()) {
1138     uint n_conc_workers = 0;
1139     if (!UseDynamicNumberOfGCThreads ||
1140         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1141          !ForceDynamicNumberOfGCThreads)) {
1142       n_conc_workers = max_parallel_marking_threads();
1143     } else {
1144       n_conc_workers =
1145         AdaptiveSizePolicy::calc_default_active_workers(
1146                                      max_parallel_marking_threads(),
1147                                      1, /* Minimum workers */
1148                                      parallel_marking_threads(),
1149                                      Threads::number_of_non_daemon_threads());
1150       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1151       // that scaling has already gone into "_max_parallel_marking_threads".
1152     }
1153     assert(n_conc_workers > 0, "Always need at least 1");
1154     return n_conc_workers;
1155   }
1156   // If we are not running with any parallel GC threads we will not
1157   // have spawned any marking threads either. Hence the number of
1158   // concurrent workers should be 0.
1159   return 0;
1160 }
1161 
1162 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1163   // Currently, only survivors can be root regions.
1164   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1165   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1166 
1167   const uintx interval = PrefetchScanIntervalInBytes;
1168   HeapWord* curr = hr->bottom();
1169   const HeapWord* end = hr->top();
1170   while (curr < end) {
1171     Prefetch::read(curr, interval);
1172     oop obj = oop(curr);
1173     int size = obj->oop_iterate(&cl);
1174     assert(size == obj->size(), "sanity");
1175     curr += size;
1176   }
1177 }
1178 
1179 class CMRootRegionScanTask : public AbstractGangTask {
1180 private:
1181   ConcurrentMark* _cm;
1182 
1183 public:
1184   CMRootRegionScanTask(ConcurrentMark* cm) :
1185     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1186 
1187   void work(uint worker_id) {
1188     assert(Thread::current()->is_ConcurrentGC_thread(),
1189            "this should only be done by a conc GC thread");
1190 
1191     CMRootRegions* root_regions = _cm->root_regions();
1192     HeapRegion* hr = root_regions->claim_next();
1193     while (hr != NULL) {
1194       _cm->scanRootRegion(hr, worker_id);
1195       hr = root_regions->claim_next();
1196     }
1197   }
1198 };
1199 
1200 void ConcurrentMark::scanRootRegions() {
1201   // scan_in_progress() will have been set to true only if there was
1202   // at least one root region to scan. So, if it's false, we
1203   // should not attempt to do any further work.
1204   if (root_regions()->scan_in_progress()) {
1205     _parallel_marking_threads = calc_parallel_marking_threads();
1206     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1207            "Maximum number of marking threads exceeded");
1208     uint active_workers = MAX2(1U, parallel_marking_threads());
1209 
1210     CMRootRegionScanTask task(this);
1211     if (use_parallel_marking_threads()) {
1212       _parallel_workers->set_active_workers((int) active_workers);
1213       _parallel_workers->run_task(&task);
1214     } else {
1215       task.work(0);
1216     }
1217 
1218     // It's possible that has_aborted() is true here without actually
1219     // aborting the survivor scan earlier. This is OK as it's
1220     // mainly used for sanity checking.
1221     root_regions()->scan_finished();
1222   }
1223 }
1224 
1225 void ConcurrentMark::markFromRoots() {
1226   // we might be tempted to assert that:
1227   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1228   //        "inconsistent argument?");
1229   // However that wouldn't be right, because it's possible that
1230   // a safepoint is indeed in progress as a younger generation
1231   // stop-the-world GC happens even as we mark in this generation.
1232 
1233   _restart_for_overflow = false;
1234   force_overflow_conc()->init();
1235 
1236   // _g1h has _n_par_threads
1237   _parallel_marking_threads = calc_parallel_marking_threads();
1238   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1239     "Maximum number of marking threads exceeded");
1240 
1241   uint active_workers = MAX2(1U, parallel_marking_threads());
1242 
1243   // Parallel task terminator is set in "set_concurrency_and_phase()"
1244   set_concurrency_and_phase(active_workers, true /* concurrent */);
1245 
1246   CMConcurrentMarkingTask markingTask(this, cmThread());
1247   if (use_parallel_marking_threads()) {
1248     _parallel_workers->set_active_workers((int)active_workers);
1249     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1250     // and the decisions on that MT processing is made elsewhere.
1251     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1252     _parallel_workers->run_task(&markingTask);
1253   } else {
1254     markingTask.work(0);
1255   }
1256   print_stats();
1257 }
1258 
1259 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1260   // world is stopped at this checkpoint
1261   assert(SafepointSynchronize::is_at_safepoint(),
1262          "world should be stopped");
1263 
1264   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1265 
1266   // If a full collection has happened, we shouldn't do this.
1267   if (has_aborted()) {
1268     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1269     return;
1270   }
1271 
1272   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1273 
1274   if (VerifyDuringGC) {
1275     HandleMark hm;  // handle scope
1276     Universe::heap()->prepare_for_verify();
1277     Universe::verify(VerifyOption_G1UsePrevMarking,
1278                      " VerifyDuringGC:(before)");
1279   }
1280 
1281   G1CollectorPolicy* g1p = g1h->g1_policy();
1282   g1p->record_concurrent_mark_remark_start();
1283 
1284   double start = os::elapsedTime();
1285 
1286   checkpointRootsFinalWork();
1287 
1288   double mark_work_end = os::elapsedTime();
1289 
1290   weakRefsWork(clear_all_soft_refs);
1291 
1292   if (has_overflown()) {
1293     // Oops.  We overflowed.  Restart concurrent marking.
1294     _restart_for_overflow = true;
1295     if (G1TraceMarkStackOverflow) {
1296       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1297     }
1298 
1299     // Verify the heap w.r.t. the previous marking bitmap.
1300     if (VerifyDuringGC) {
1301       HandleMark hm;  // handle scope
1302       Universe::heap()->prepare_for_verify();
1303       Universe::verify(VerifyOption_G1UsePrevMarking,
1304                        " VerifyDuringGC:(overflow)");
1305     }
1306 
1307     // Clear the marking state because we will be restarting
1308     // marking due to overflowing the global mark stack.
1309     reset_marking_state();
1310   } else {
1311     // Aggregate the per-task counting data that we have accumulated
1312     // while marking.
1313     aggregate_count_data();
1314 
1315     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1316     // We're done with marking.
1317     // This is the end of  the marking cycle, we're expected all
1318     // threads to have SATB queues with active set to true.
1319     satb_mq_set.set_active_all_threads(false, /* new active value */
1320                                        true /* expected_active */);
1321 
1322     if (VerifyDuringGC) {
1323       HandleMark hm;  // handle scope
1324       Universe::heap()->prepare_for_verify();
1325       Universe::verify(VerifyOption_G1UseNextMarking,
1326                        " VerifyDuringGC:(after)");
1327     }
1328     assert(!restart_for_overflow(), "sanity");
1329     // Completely reset the marking state since marking completed
1330     set_non_marking_state();
1331   }
1332 
1333   // Expand the marking stack, if we have to and if we can.
1334   if (_markStack.should_expand()) {
1335     _markStack.expand();
1336   }
1337 
1338   // Statistics
1339   double now = os::elapsedTime();
1340   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1341   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1342   _remark_times.add((now - start) * 1000.0);
1343 
1344   g1p->record_concurrent_mark_remark_end();
1345 }
1346 
1347 // Base class of the closures that finalize and verify the
1348 // liveness counting data.
1349 class CMCountDataClosureBase: public HeapRegionClosure {
1350 protected:
1351   G1CollectedHeap* _g1h;
1352   ConcurrentMark* _cm;
1353   CardTableModRefBS* _ct_bs;
1354 
1355   BitMap* _region_bm;
1356   BitMap* _card_bm;
1357 
1358   // Takes a region that's not empty (i.e., it has at least one
1359   // live object in it and sets its corresponding bit on the region
1360   // bitmap to 1. If the region is "starts humongous" it will also set
1361   // to 1 the bits on the region bitmap that correspond to its
1362   // associated "continues humongous" regions.
1363   void set_bit_for_region(HeapRegion* hr) {
1364     assert(!hr->continuesHumongous(), "should have filtered those out");
1365 
1366     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1367     if (!hr->startsHumongous()) {
1368       // Normal (non-humongous) case: just set the bit.
1369       _region_bm->par_at_put(index, true);
1370     } else {
1371       // Starts humongous case: calculate how many regions are part of
1372       // this humongous region and then set the bit range.
1373       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1374       _region_bm->par_at_put_range(index, end_index, true);
1375     }
1376   }
1377 
1378 public:
1379   CMCountDataClosureBase(G1CollectedHeap* g1h,
1380                          BitMap* region_bm, BitMap* card_bm):
1381     _g1h(g1h), _cm(g1h->concurrent_mark()),
1382     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1383     _region_bm(region_bm), _card_bm(card_bm) { }
1384 };
1385 
1386 // Closure that calculates the # live objects per region. Used
1387 // for verification purposes during the cleanup pause.
1388 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1389   CMBitMapRO* _bm;
1390   size_t _region_marked_bytes;
1391 
1392 public:
1393   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1394                          BitMap* region_bm, BitMap* card_bm) :
1395     CMCountDataClosureBase(g1h, region_bm, card_bm),
1396     _bm(bm), _region_marked_bytes(0) { }
1397 
1398   bool doHeapRegion(HeapRegion* hr) {
1399 
1400     if (hr->continuesHumongous()) {
1401       // We will ignore these here and process them when their
1402       // associated "starts humongous" region is processed (see
1403       // set_bit_for_heap_region()). Note that we cannot rely on their
1404       // associated "starts humongous" region to have their bit set to
1405       // 1 since, due to the region chunking in the parallel region
1406       // iteration, a "continues humongous" region might be visited
1407       // before its associated "starts humongous".
1408       return false;
1409     }
1410 
1411     HeapWord* ntams = hr->next_top_at_mark_start();
1412     HeapWord* start = hr->bottom();
1413 
1414     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1415            err_msg("Preconditions not met - "
1416                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1417                    start, ntams, hr->end()));
1418 
1419     // Find the first marked object at or after "start".
1420     start = _bm->getNextMarkedWordAddress(start, ntams);
1421 
1422     size_t marked_bytes = 0;
1423 
1424     while (start < ntams) {
1425       oop obj = oop(start);
1426       int obj_sz = obj->size();
1427       HeapWord* obj_end = start + obj_sz;
1428 
1429       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1430       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1431 
1432       // Note: if we're looking at the last region in heap - obj_end
1433       // could be actually just beyond the end of the heap; end_idx
1434       // will then correspond to a (non-existent) card that is also
1435       // just beyond the heap.
1436       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1437         // end of object is not card aligned - increment to cover
1438         // all the cards spanned by the object
1439         end_idx += 1;
1440       }
1441 
1442       // Set the bits in the card BM for the cards spanned by this object.
1443       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1444 
1445       // Add the size of this object to the number of marked bytes.
1446       marked_bytes += (size_t)obj_sz * HeapWordSize;
1447 
1448       // Find the next marked object after this one.
1449       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1450     }
1451 
1452     // Mark the allocated-since-marking portion...
1453     HeapWord* top = hr->top();
1454     if (ntams < top) {
1455       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1456       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1457 
1458       // Note: if we're looking at the last region in heap - top
1459       // could be actually just beyond the end of the heap; end_idx
1460       // will then correspond to a (non-existent) card that is also
1461       // just beyond the heap.
1462       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1463         // end of object is not card aligned - increment to cover
1464         // all the cards spanned by the object
1465         end_idx += 1;
1466       }
1467       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1468 
1469       // This definitely means the region has live objects.
1470       set_bit_for_region(hr);
1471     }
1472 
1473     // Update the live region bitmap.
1474     if (marked_bytes > 0) {
1475       set_bit_for_region(hr);
1476     }
1477 
1478     // Set the marked bytes for the current region so that
1479     // it can be queried by a calling verificiation routine
1480     _region_marked_bytes = marked_bytes;
1481 
1482     return false;
1483   }
1484 
1485   size_t region_marked_bytes() const { return _region_marked_bytes; }
1486 };
1487 
1488 // Heap region closure used for verifying the counting data
1489 // that was accumulated concurrently and aggregated during
1490 // the remark pause. This closure is applied to the heap
1491 // regions during the STW cleanup pause.
1492 
1493 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1494   G1CollectedHeap* _g1h;
1495   ConcurrentMark* _cm;
1496   CalcLiveObjectsClosure _calc_cl;
1497   BitMap* _region_bm;   // Region BM to be verified
1498   BitMap* _card_bm;     // Card BM to be verified
1499   bool _verbose;        // verbose output?
1500 
1501   BitMap* _exp_region_bm; // Expected Region BM values
1502   BitMap* _exp_card_bm;   // Expected card BM values
1503 
1504   int _failures;
1505 
1506 public:
1507   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1508                                 BitMap* region_bm,
1509                                 BitMap* card_bm,
1510                                 BitMap* exp_region_bm,
1511                                 BitMap* exp_card_bm,
1512                                 bool verbose) :
1513     _g1h(g1h), _cm(g1h->concurrent_mark()),
1514     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1515     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1516     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1517     _failures(0) { }
1518 
1519   int failures() const { return _failures; }
1520 
1521   bool doHeapRegion(HeapRegion* hr) {
1522     if (hr->continuesHumongous()) {
1523       // We will ignore these here and process them when their
1524       // associated "starts humongous" region is processed (see
1525       // set_bit_for_heap_region()). Note that we cannot rely on their
1526       // associated "starts humongous" region to have their bit set to
1527       // 1 since, due to the region chunking in the parallel region
1528       // iteration, a "continues humongous" region might be visited
1529       // before its associated "starts humongous".
1530       return false;
1531     }
1532 
1533     int failures = 0;
1534 
1535     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1536     // this region and set the corresponding bits in the expected region
1537     // and card bitmaps.
1538     bool res = _calc_cl.doHeapRegion(hr);
1539     assert(res == false, "should be continuing");
1540 
1541     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1542                     Mutex::_no_safepoint_check_flag);
1543 
1544     // Verify the marked bytes for this region.
1545     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1546     size_t act_marked_bytes = hr->next_marked_bytes();
1547 
1548     // We're not OK if expected marked bytes > actual marked bytes. It means
1549     // we have missed accounting some objects during the actual marking.
1550     if (exp_marked_bytes > act_marked_bytes) {
1551       if (_verbose) {
1552         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1553                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1554                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1555       }
1556       failures += 1;
1557     }
1558 
1559     // Verify the bit, for this region, in the actual and expected
1560     // (which was just calculated) region bit maps.
1561     // We're not OK if the bit in the calculated expected region
1562     // bitmap is set and the bit in the actual region bitmap is not.
1563     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1564 
1565     bool expected = _exp_region_bm->at(index);
1566     bool actual = _region_bm->at(index);
1567     if (expected && !actual) {
1568       if (_verbose) {
1569         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1570                                "expected: %s, actual: %s",
1571                                hr->hrs_index(),
1572                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1573       }
1574       failures += 1;
1575     }
1576 
1577     // Verify that the card bit maps for the cards spanned by the current
1578     // region match. We have an error if we have a set bit in the expected
1579     // bit map and the corresponding bit in the actual bitmap is not set.
1580 
1581     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1582     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1583 
1584     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1585       expected = _exp_card_bm->at(i);
1586       actual = _card_bm->at(i);
1587 
1588       if (expected && !actual) {
1589         if (_verbose) {
1590           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1591                                  "expected: %s, actual: %s",
1592                                  hr->hrs_index(), i,
1593                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1594         }
1595         failures += 1;
1596       }
1597     }
1598 
1599     if (failures > 0 && _verbose)  {
1600       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1601                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1602                              HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1603                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1604     }
1605 
1606     _failures += failures;
1607 
1608     // We could stop iteration over the heap when we
1609     // find the first violating region by returning true.
1610     return false;
1611   }
1612 };
1613 
1614 
1615 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1616 protected:
1617   G1CollectedHeap* _g1h;
1618   ConcurrentMark* _cm;
1619   BitMap* _actual_region_bm;
1620   BitMap* _actual_card_bm;
1621 
1622   uint    _n_workers;
1623 
1624   BitMap* _expected_region_bm;
1625   BitMap* _expected_card_bm;
1626 
1627   int  _failures;
1628   bool _verbose;
1629 
1630 public:
1631   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1632                             BitMap* region_bm, BitMap* card_bm,
1633                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1634     : AbstractGangTask("G1 verify final counting"),
1635       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1636       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1637       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1638       _failures(0), _verbose(false),
1639       _n_workers(0) {
1640     assert(VerifyDuringGC, "don't call this otherwise");
1641 
1642     // Use the value already set as the number of active threads
1643     // in the call to run_task().
1644     if (G1CollectedHeap::use_parallel_gc_threads()) {
1645       assert( _g1h->workers()->active_workers() > 0,
1646         "Should have been previously set");
1647       _n_workers = _g1h->workers()->active_workers();
1648     } else {
1649       _n_workers = 1;
1650     }
1651 
1652     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1653     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1654 
1655     _verbose = _cm->verbose_medium();
1656   }
1657 
1658   void work(uint worker_id) {
1659     assert(worker_id < _n_workers, "invariant");
1660 
1661     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1662                                             _actual_region_bm, _actual_card_bm,
1663                                             _expected_region_bm,
1664                                             _expected_card_bm,
1665                                             _verbose);
1666 
1667     if (G1CollectedHeap::use_parallel_gc_threads()) {
1668       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1669                                             worker_id,
1670                                             _n_workers,
1671                                             HeapRegion::VerifyCountClaimValue);
1672     } else {
1673       _g1h->heap_region_iterate(&verify_cl);
1674     }
1675 
1676     Atomic::add(verify_cl.failures(), &_failures);
1677   }
1678 
1679   int failures() const { return _failures; }
1680 };
1681 
1682 // Closure that finalizes the liveness counting data.
1683 // Used during the cleanup pause.
1684 // Sets the bits corresponding to the interval [NTAMS, top]
1685 // (which contains the implicitly live objects) in the
1686 // card liveness bitmap. Also sets the bit for each region,
1687 // containing live data, in the region liveness bitmap.
1688 
1689 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1690  public:
1691   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1692                               BitMap* region_bm,
1693                               BitMap* card_bm) :
1694     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1695 
1696   bool doHeapRegion(HeapRegion* hr) {
1697 
1698     if (hr->continuesHumongous()) {
1699       // We will ignore these here and process them when their
1700       // associated "starts humongous" region is processed (see
1701       // set_bit_for_heap_region()). Note that we cannot rely on their
1702       // associated "starts humongous" region to have their bit set to
1703       // 1 since, due to the region chunking in the parallel region
1704       // iteration, a "continues humongous" region might be visited
1705       // before its associated "starts humongous".
1706       return false;
1707     }
1708 
1709     HeapWord* ntams = hr->next_top_at_mark_start();
1710     HeapWord* top   = hr->top();
1711 
1712     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1713 
1714     // Mark the allocated-since-marking portion...
1715     if (ntams < top) {
1716       // This definitely means the region has live objects.
1717       set_bit_for_region(hr);
1718 
1719       // Now set the bits in the card bitmap for [ntams, top)
1720       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1721       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1722 
1723       // Note: if we're looking at the last region in heap - top
1724       // could be actually just beyond the end of the heap; end_idx
1725       // will then correspond to a (non-existent) card that is also
1726       // just beyond the heap.
1727       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1728         // end of object is not card aligned - increment to cover
1729         // all the cards spanned by the object
1730         end_idx += 1;
1731       }
1732 
1733       assert(end_idx <= _card_bm->size(),
1734              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1735                      end_idx, _card_bm->size()));
1736       assert(start_idx < _card_bm->size(),
1737              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1738                      start_idx, _card_bm->size()));
1739 
1740       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1741     }
1742 
1743     // Set the bit for the region if it contains live data
1744     if (hr->next_marked_bytes() > 0) {
1745       set_bit_for_region(hr);
1746     }
1747 
1748     return false;
1749   }
1750 };
1751 
1752 class G1ParFinalCountTask: public AbstractGangTask {
1753 protected:
1754   G1CollectedHeap* _g1h;
1755   ConcurrentMark* _cm;
1756   BitMap* _actual_region_bm;
1757   BitMap* _actual_card_bm;
1758 
1759   uint    _n_workers;
1760 
1761 public:
1762   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1763     : AbstractGangTask("G1 final counting"),
1764       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1765       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1766       _n_workers(0) {
1767     // Use the value already set as the number of active threads
1768     // in the call to run_task().
1769     if (G1CollectedHeap::use_parallel_gc_threads()) {
1770       assert( _g1h->workers()->active_workers() > 0,
1771         "Should have been previously set");
1772       _n_workers = _g1h->workers()->active_workers();
1773     } else {
1774       _n_workers = 1;
1775     }
1776   }
1777 
1778   void work(uint worker_id) {
1779     assert(worker_id < _n_workers, "invariant");
1780 
1781     FinalCountDataUpdateClosure final_update_cl(_g1h,
1782                                                 _actual_region_bm,
1783                                                 _actual_card_bm);
1784 
1785     if (G1CollectedHeap::use_parallel_gc_threads()) {
1786       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1787                                             worker_id,
1788                                             _n_workers,
1789                                             HeapRegion::FinalCountClaimValue);
1790     } else {
1791       _g1h->heap_region_iterate(&final_update_cl);
1792     }
1793   }
1794 };
1795 
1796 class G1ParNoteEndTask;
1797 
1798 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1799   G1CollectedHeap* _g1;
1800   int _worker_num;
1801   size_t _max_live_bytes;
1802   uint _regions_claimed;
1803   size_t _freed_bytes;
1804   FreeRegionList* _local_cleanup_list;
1805   OldRegionSet* _old_proxy_set;
1806   HumongousRegionSet* _humongous_proxy_set;
1807   HRRSCleanupTask* _hrrs_cleanup_task;
1808   double _claimed_region_time;
1809   double _max_region_time;
1810 
1811 public:
1812   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1813                              int worker_num,
1814                              FreeRegionList* local_cleanup_list,
1815                              OldRegionSet* old_proxy_set,
1816                              HumongousRegionSet* humongous_proxy_set,
1817                              HRRSCleanupTask* hrrs_cleanup_task) :
1818     _g1(g1), _worker_num(worker_num),
1819     _max_live_bytes(0), _regions_claimed(0),
1820     _freed_bytes(0),
1821     _claimed_region_time(0.0), _max_region_time(0.0),
1822     _local_cleanup_list(local_cleanup_list),
1823     _old_proxy_set(old_proxy_set),
1824     _humongous_proxy_set(humongous_proxy_set),
1825     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1826 
1827   size_t freed_bytes() { return _freed_bytes; }
1828 
1829   bool doHeapRegion(HeapRegion *hr) {
1830     if (hr->continuesHumongous()) {
1831       return false;
1832     }
1833     // We use a claim value of zero here because all regions
1834     // were claimed with value 1 in the FinalCount task.
1835     _g1->reset_gc_time_stamps(hr);
1836     double start = os::elapsedTime();
1837     _regions_claimed++;
1838     hr->note_end_of_marking();
1839     _max_live_bytes += hr->max_live_bytes();
1840     _g1->free_region_if_empty(hr,
1841                               &_freed_bytes,
1842                               _local_cleanup_list,
1843                               _old_proxy_set,
1844                               _humongous_proxy_set,
1845                               _hrrs_cleanup_task,
1846                               true /* par */);
1847     double region_time = (os::elapsedTime() - start);
1848     _claimed_region_time += region_time;
1849     if (region_time > _max_region_time) {
1850       _max_region_time = region_time;
1851     }
1852     return false;
1853   }
1854 
1855   size_t max_live_bytes() { return _max_live_bytes; }
1856   uint regions_claimed() { return _regions_claimed; }
1857   double claimed_region_time_sec() { return _claimed_region_time; }
1858   double max_region_time_sec() { return _max_region_time; }
1859 };
1860 
1861 class G1ParNoteEndTask: public AbstractGangTask {
1862   friend class G1NoteEndOfConcMarkClosure;
1863 
1864 protected:
1865   G1CollectedHeap* _g1h;
1866   size_t _max_live_bytes;
1867   size_t _freed_bytes;
1868   FreeRegionList* _cleanup_list;
1869 
1870 public:
1871   G1ParNoteEndTask(G1CollectedHeap* g1h,
1872                    FreeRegionList* cleanup_list) :
1873     AbstractGangTask("G1 note end"), _g1h(g1h),
1874     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1875 
1876   void work(uint worker_id) {
1877     double start = os::elapsedTime();
1878     FreeRegionList local_cleanup_list("Local Cleanup List");
1879     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
1880     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1881     HRRSCleanupTask hrrs_cleanup_task;
1882     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1883                                            &old_proxy_set,
1884                                            &humongous_proxy_set,
1885                                            &hrrs_cleanup_task);
1886     if (G1CollectedHeap::use_parallel_gc_threads()) {
1887       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1888                                             _g1h->workers()->active_workers(),
1889                                             HeapRegion::NoteEndClaimValue);
1890     } else {
1891       _g1h->heap_region_iterate(&g1_note_end);
1892     }
1893     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1894 
1895     // Now update the lists
1896     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
1897                                             NULL /* free_list */,
1898                                             &old_proxy_set,
1899                                             &humongous_proxy_set,
1900                                             true /* par */);
1901     {
1902       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1903       _max_live_bytes += g1_note_end.max_live_bytes();
1904       _freed_bytes += g1_note_end.freed_bytes();
1905 
1906       // If we iterate over the global cleanup list at the end of
1907       // cleanup to do this printing we will not guarantee to only
1908       // generate output for the newly-reclaimed regions (the list
1909       // might not be empty at the beginning of cleanup; we might
1910       // still be working on its previous contents). So we do the
1911       // printing here, before we append the new regions to the global
1912       // cleanup list.
1913 
1914       G1HRPrinter* hr_printer = _g1h->hr_printer();
1915       if (hr_printer->is_active()) {
1916         HeapRegionLinkedListIterator iter(&local_cleanup_list);
1917         while (iter.more_available()) {
1918           HeapRegion* hr = iter.get_next();
1919           hr_printer->cleanup(hr);
1920         }
1921       }
1922 
1923       _cleanup_list->add_as_tail(&local_cleanup_list);
1924       assert(local_cleanup_list.is_empty(), "post-condition");
1925 
1926       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1927     }
1928   }
1929   size_t max_live_bytes() { return _max_live_bytes; }
1930   size_t freed_bytes() { return _freed_bytes; }
1931 };
1932 
1933 class G1ParScrubRemSetTask: public AbstractGangTask {
1934 protected:
1935   G1RemSet* _g1rs;
1936   BitMap* _region_bm;
1937   BitMap* _card_bm;
1938 public:
1939   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1940                        BitMap* region_bm, BitMap* card_bm) :
1941     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1942     _region_bm(region_bm), _card_bm(card_bm) { }
1943 
1944   void work(uint worker_id) {
1945     if (G1CollectedHeap::use_parallel_gc_threads()) {
1946       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1947                        HeapRegion::ScrubRemSetClaimValue);
1948     } else {
1949       _g1rs->scrub(_region_bm, _card_bm);
1950     }
1951   }
1952 
1953 };
1954 
1955 void ConcurrentMark::cleanup() {
1956   // world is stopped at this checkpoint
1957   assert(SafepointSynchronize::is_at_safepoint(),
1958          "world should be stopped");
1959   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1960 
1961   // If a full collection has happened, we shouldn't do this.
1962   if (has_aborted()) {
1963     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1964     return;
1965   }
1966 
1967   HRSPhaseSetter x(HRSPhaseCleanup);
1968   g1h->verify_region_sets_optional();
1969 
1970   if (VerifyDuringGC) {
1971     HandleMark hm;  // handle scope
1972     Universe::heap()->prepare_for_verify();
1973     Universe::verify(VerifyOption_G1UsePrevMarking,
1974                      " VerifyDuringGC:(before)");
1975   }
1976 
1977   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1978   g1p->record_concurrent_mark_cleanup_start();
1979 
1980   double start = os::elapsedTime();
1981 
1982   HeapRegionRemSet::reset_for_cleanup_tasks();
1983 
1984   uint n_workers;
1985 
1986   // Do counting once more with the world stopped for good measure.
1987   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1988 
1989   if (G1CollectedHeap::use_parallel_gc_threads()) {
1990    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1991            "sanity check");
1992 
1993     g1h->set_par_threads();
1994     n_workers = g1h->n_par_threads();
1995     assert(g1h->n_par_threads() == n_workers,
1996            "Should not have been reset");
1997     g1h->workers()->run_task(&g1_par_count_task);
1998     // Done with the parallel phase so reset to 0.
1999     g1h->set_par_threads(0);
2000 
2001     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2002            "sanity check");
2003   } else {
2004     n_workers = 1;
2005     g1_par_count_task.work(0);
2006   }
2007 
2008   if (VerifyDuringGC) {
2009     // Verify that the counting data accumulated during marking matches
2010     // that calculated by walking the marking bitmap.
2011 
2012     // Bitmaps to hold expected values
2013     BitMap expected_region_bm(_region_bm.size(), false);
2014     BitMap expected_card_bm(_card_bm.size(), false);
2015 
2016     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2017                                                  &_region_bm,
2018                                                  &_card_bm,
2019                                                  &expected_region_bm,
2020                                                  &expected_card_bm);
2021 
2022     if (G1CollectedHeap::use_parallel_gc_threads()) {
2023       g1h->set_par_threads((int)n_workers);
2024       g1h->workers()->run_task(&g1_par_verify_task);
2025       // Done with the parallel phase so reset to 0.
2026       g1h->set_par_threads(0);
2027 
2028       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2029              "sanity check");
2030     } else {
2031       g1_par_verify_task.work(0);
2032     }
2033 
2034     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2035   }
2036 
2037   size_t start_used_bytes = g1h->used();
2038   g1h->set_marking_complete();
2039 
2040   double count_end = os::elapsedTime();
2041   double this_final_counting_time = (count_end - start);
2042   _total_counting_time += this_final_counting_time;
2043 
2044   if (G1PrintRegionLivenessInfo) {
2045     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2046     _g1h->heap_region_iterate(&cl);
2047   }
2048 
2049   // Install newly created mark bitMap as "prev".
2050   swapMarkBitMaps();
2051 
2052   g1h->reset_gc_time_stamp();
2053 
2054   // Note end of marking in all heap regions.
2055   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2056   if (G1CollectedHeap::use_parallel_gc_threads()) {
2057     g1h->set_par_threads((int)n_workers);
2058     g1h->workers()->run_task(&g1_par_note_end_task);
2059     g1h->set_par_threads(0);
2060 
2061     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2062            "sanity check");
2063   } else {
2064     g1_par_note_end_task.work(0);
2065   }
2066   g1h->check_gc_time_stamps();
2067 
2068   if (!cleanup_list_is_empty()) {
2069     // The cleanup list is not empty, so we'll have to process it
2070     // concurrently. Notify anyone else that might be wanting free
2071     // regions that there will be more free regions coming soon.
2072     g1h->set_free_regions_coming();
2073   }
2074 
2075   // call below, since it affects the metric by which we sort the heap
2076   // regions.
2077   if (G1ScrubRemSets) {
2078     double rs_scrub_start = os::elapsedTime();
2079     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2080     if (G1CollectedHeap::use_parallel_gc_threads()) {
2081       g1h->set_par_threads((int)n_workers);
2082       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2083       g1h->set_par_threads(0);
2084 
2085       assert(g1h->check_heap_region_claim_values(
2086                                             HeapRegion::ScrubRemSetClaimValue),
2087              "sanity check");
2088     } else {
2089       g1_par_scrub_rs_task.work(0);
2090     }
2091 
2092     double rs_scrub_end = os::elapsedTime();
2093     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2094     _total_rs_scrub_time += this_rs_scrub_time;
2095   }
2096 
2097   // this will also free any regions totally full of garbage objects,
2098   // and sort the regions.
2099   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2100 
2101   // Statistics.
2102   double end = os::elapsedTime();
2103   _cleanup_times.add((end - start) * 1000.0);
2104 
2105   if (G1Log::fine()) {
2106     g1h->print_size_transition(gclog_or_tty,
2107                                start_used_bytes,
2108                                g1h->used(),
2109                                g1h->capacity());
2110   }
2111 
2112   // Clean up will have freed any regions completely full of garbage.
2113   // Update the soft reference policy with the new heap occupancy.
2114   Universe::update_heap_info_at_gc();
2115 
2116   // We need to make this be a "collection" so any collection pause that
2117   // races with it goes around and waits for completeCleanup to finish.
2118   g1h->increment_total_collections();
2119 
2120   // We reclaimed old regions so we should calculate the sizes to make
2121   // sure we update the old gen/space data.
2122   g1h->g1mm()->update_sizes();
2123 
2124   if (VerifyDuringGC) {
2125     HandleMark hm;  // handle scope
2126     Universe::heap()->prepare_for_verify();
2127     Universe::verify(VerifyOption_G1UsePrevMarking,
2128                      " VerifyDuringGC:(after)");
2129   }
2130 
2131   g1h->verify_region_sets_optional();
2132 }
2133 
2134 void ConcurrentMark::completeCleanup() {
2135   if (has_aborted()) return;
2136 
2137   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2138 
2139   _cleanup_list.verify_optional();
2140   FreeRegionList tmp_free_list("Tmp Free List");
2141 
2142   if (G1ConcRegionFreeingVerbose) {
2143     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2144                            "cleanup list has %u entries",
2145                            _cleanup_list.length());
2146   }
2147 
2148   // Noone else should be accessing the _cleanup_list at this point,
2149   // so it's not necessary to take any locks
2150   while (!_cleanup_list.is_empty()) {
2151     HeapRegion* hr = _cleanup_list.remove_head();
2152     assert(hr != NULL, "the list was not empty");
2153     hr->par_clear();
2154     tmp_free_list.add_as_tail(hr);
2155 
2156     // Instead of adding one region at a time to the secondary_free_list,
2157     // we accumulate them in the local list and move them a few at a
2158     // time. This also cuts down on the number of notify_all() calls
2159     // we do during this process. We'll also append the local list when
2160     // _cleanup_list is empty (which means we just removed the last
2161     // region from the _cleanup_list).
2162     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2163         _cleanup_list.is_empty()) {
2164       if (G1ConcRegionFreeingVerbose) {
2165         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2166                                "appending %u entries to the secondary_free_list, "
2167                                "cleanup list still has %u entries",
2168                                tmp_free_list.length(),
2169                                _cleanup_list.length());
2170       }
2171 
2172       {
2173         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2174         g1h->secondary_free_list_add_as_tail(&tmp_free_list);
2175         SecondaryFreeList_lock->notify_all();
2176       }
2177 
2178       if (G1StressConcRegionFreeing) {
2179         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2180           os::sleep(Thread::current(), (jlong) 1, false);
2181         }
2182       }
2183     }
2184   }
2185   assert(tmp_free_list.is_empty(), "post-condition");
2186 }
2187 
2188 // Supporting Object and Oop closures for reference discovery
2189 // and processing in during marking
2190 
2191 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2192   HeapWord* addr = (HeapWord*)obj;
2193   return addr != NULL &&
2194          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2195 }
2196 
2197 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2198 // Uses the CMTask associated with a worker thread (for serial reference
2199 // processing the CMTask for worker 0 is used) to preserve (mark) and
2200 // trace referent objects.
2201 //
2202 // Using the CMTask and embedded local queues avoids having the worker
2203 // threads operating on the global mark stack. This reduces the risk
2204 // of overflowing the stack - which we would rather avoid at this late
2205 // state. Also using the tasks' local queues removes the potential
2206 // of the workers interfering with each other that could occur if
2207 // operating on the global stack.
2208 
2209 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2210   ConcurrentMark* _cm;
2211   CMTask*         _task;
2212   int             _ref_counter_limit;
2213   int             _ref_counter;
2214   bool            _is_serial;
2215  public:
2216   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2217     _cm(cm), _task(task), _is_serial(is_serial),
2218     _ref_counter_limit(G1RefProcDrainInterval) {
2219     assert(_ref_counter_limit > 0, "sanity");
2220     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2221     _ref_counter = _ref_counter_limit;
2222   }
2223 
2224   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2225   virtual void do_oop(      oop* p) { do_oop_work(p); }
2226 
2227   template <class T> void do_oop_work(T* p) {
2228     if (!_cm->has_overflown()) {
2229       oop obj = oopDesc::load_decode_heap_oop(p);
2230       if (_cm->verbose_high()) {
2231         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2232                                "*"PTR_FORMAT" = "PTR_FORMAT,
2233                                _task->worker_id(), p, (void*) obj);
2234       }
2235 
2236       _task->deal_with_reference(obj);
2237       _ref_counter--;
2238 
2239       if (_ref_counter == 0) {
2240         // We have dealt with _ref_counter_limit references, pushing them
2241         // and objects reachable from them on to the local stack (and
2242         // possibly the global stack). Call CMTask::do_marking_step() to
2243         // process these entries.
2244         //
2245         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2246         // there's nothing more to do (i.e. we're done with the entries that
2247         // were pushed as a result of the CMTask::deal_with_reference() calls
2248         // above) or we overflow.
2249         //
2250         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2251         // flag while there may still be some work to do. (See the comment at
2252         // the beginning of CMTask::do_marking_step() for those conditions -
2253         // one of which is reaching the specified time target.) It is only
2254         // when CMTask::do_marking_step() returns without setting the
2255         // has_aborted() flag that the marking step has completed.
2256         do {
2257           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2258           _task->do_marking_step(mark_step_duration_ms,
2259                                  false      /* do_termination */,
2260                                  _is_serial);
2261         } while (_task->has_aborted() && !_cm->has_overflown());
2262         _ref_counter = _ref_counter_limit;
2263       }
2264     } else {
2265       if (_cm->verbose_high()) {
2266          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2267       }
2268     }
2269   }
2270 };
2271 
2272 // 'Drain' oop closure used by both serial and parallel reference processing.
2273 // Uses the CMTask associated with a given worker thread (for serial
2274 // reference processing the CMtask for worker 0 is used). Calls the
2275 // do_marking_step routine, with an unbelievably large timeout value,
2276 // to drain the marking data structures of the remaining entries
2277 // added by the 'keep alive' oop closure above.
2278 
2279 class G1CMDrainMarkingStackClosure: public VoidClosure {
2280   ConcurrentMark* _cm;
2281   CMTask*         _task;
2282   bool            _is_serial;
2283  public:
2284   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2285     _cm(cm), _task(task), _is_serial(is_serial) {
2286     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2287   }
2288 
2289   void do_void() {
2290     do {
2291       if (_cm->verbose_high()) {
2292         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2293                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2294       }
2295 
2296       // We call CMTask::do_marking_step() to completely drain the local
2297       // and global marking stacks of entries pushed by the 'keep alive'
2298       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2299       //
2300       // CMTask::do_marking_step() is called in a loop, which we'll exit
2301       // if there's nothing more to do (i.e. we'completely drained the
2302       // entries that were pushed as a a result of applying the 'keep alive'
2303       // closure to the entries on the discovered ref lists) or we overflow
2304       // the global marking stack.
2305       //
2306       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2307       // flag while there may still be some work to do. (See the comment at
2308       // the beginning of CMTask::do_marking_step() for those conditions -
2309       // one of which is reaching the specified time target.) It is only
2310       // when CMTask::do_marking_step() returns without setting the
2311       // has_aborted() flag that the marking step has completed.
2312 
2313       _task->do_marking_step(1000000000.0 /* something very large */,
2314                              true         /* do_termination */,
2315                              _is_serial);
2316     } while (_task->has_aborted() && !_cm->has_overflown());
2317   }
2318 };
2319 
2320 // Implementation of AbstractRefProcTaskExecutor for parallel
2321 // reference processing at the end of G1 concurrent marking
2322 
2323 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2324 private:
2325   G1CollectedHeap* _g1h;
2326   ConcurrentMark*  _cm;
2327   WorkGang*        _workers;
2328   int              _active_workers;
2329 
2330 public:
2331   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2332                         ConcurrentMark* cm,
2333                         WorkGang* workers,
2334                         int n_workers) :
2335     _g1h(g1h), _cm(cm),
2336     _workers(workers), _active_workers(n_workers) { }
2337 
2338   // Executes the given task using concurrent marking worker threads.
2339   virtual void execute(ProcessTask& task);
2340   virtual void execute(EnqueueTask& task);
2341 };
2342 
2343 class G1CMRefProcTaskProxy: public AbstractGangTask {
2344   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2345   ProcessTask&     _proc_task;
2346   G1CollectedHeap* _g1h;
2347   ConcurrentMark*  _cm;
2348 
2349 public:
2350   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2351                      G1CollectedHeap* g1h,
2352                      ConcurrentMark* cm) :
2353     AbstractGangTask("Process reference objects in parallel"),
2354     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2355     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2356     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2357   }
2358 
2359   virtual void work(uint worker_id) {
2360     CMTask* task = _cm->task(worker_id);
2361     G1CMIsAliveClosure g1_is_alive(_g1h);
2362     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2363     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2364 
2365     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2366   }
2367 };
2368 
2369 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2370   assert(_workers != NULL, "Need parallel worker threads.");
2371   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2372 
2373   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2374 
2375   // We need to reset the concurrency level before each
2376   // proxy task execution, so that the termination protocol
2377   // and overflow handling in CMTask::do_marking_step() knows
2378   // how many workers to wait for.
2379   _cm->set_concurrency(_active_workers);
2380   _g1h->set_par_threads(_active_workers);
2381   _workers->run_task(&proc_task_proxy);
2382   _g1h->set_par_threads(0);
2383 }
2384 
2385 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2386   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2387   EnqueueTask& _enq_task;
2388 
2389 public:
2390   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2391     AbstractGangTask("Enqueue reference objects in parallel"),
2392     _enq_task(enq_task) { }
2393 
2394   virtual void work(uint worker_id) {
2395     _enq_task.work(worker_id);
2396   }
2397 };
2398 
2399 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2400   assert(_workers != NULL, "Need parallel worker threads.");
2401   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2402 
2403   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2404 
2405   // Not strictly necessary but...
2406   //
2407   // We need to reset the concurrency level before each
2408   // proxy task execution, so that the termination protocol
2409   // and overflow handling in CMTask::do_marking_step() knows
2410   // how many workers to wait for.
2411   _cm->set_concurrency(_active_workers);
2412   _g1h->set_par_threads(_active_workers);
2413   _workers->run_task(&enq_task_proxy);
2414   _g1h->set_par_threads(0);
2415 }
2416 
2417 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2418   if (has_overflown()) {
2419     // Skip processing the discovered references if we have
2420     // overflown the global marking stack. Reference objects
2421     // only get discovered once so it is OK to not
2422     // de-populate the discovered reference lists. We could have,
2423     // but the only benefit would be that, when marking restarts,
2424     // less reference objects are discovered.
2425     return;
2426   }
2427 
2428   ResourceMark rm;
2429   HandleMark   hm;
2430 
2431   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2432 
2433   // Is alive closure.
2434   G1CMIsAliveClosure g1_is_alive(g1h);
2435 
2436   // Inner scope to exclude the cleaning of the string and symbol
2437   // tables from the displayed time.
2438   {
2439     if (G1Log::finer()) {
2440       gclog_or_tty->put(' ');
2441     }
2442     TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2443 
2444     ReferenceProcessor* rp = g1h->ref_processor_cm();
2445 
2446     // See the comment in G1CollectedHeap::ref_processing_init()
2447     // about how reference processing currently works in G1.
2448 
2449     // Set the soft reference policy
2450     rp->setup_policy(clear_all_soft_refs);
2451     assert(_markStack.isEmpty(), "mark stack should be empty");
2452 
2453     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2454     // in serial reference processing. Note these closures are also
2455     // used for serially processing (by the the current thread) the
2456     // JNI references during parallel reference processing.
2457     //
2458     // These closures do not need to synchronize with the worker
2459     // threads involved in parallel reference processing as these
2460     // instances are executed serially by the current thread (e.g.
2461     // reference processing is not multi-threaded and is thus
2462     // performed by the current thread instead of a gang worker).
2463     //
2464     // The gang tasks involved in parallel reference procssing create
2465     // their own instances of these closures, which do their own
2466     // synchronization among themselves.
2467     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2468     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2469 
2470     // We need at least one active thread. If reference processing
2471     // is not multi-threaded we use the current (VMThread) thread,
2472     // otherwise we use the work gang from the G1CollectedHeap and
2473     // we utilize all the worker threads we can.
2474     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2475     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2476     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2477 
2478     // Parallel processing task executor.
2479     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2480                                               g1h->workers(), active_workers);
2481     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2482 
2483     // Set the concurrency level. The phase was already set prior to
2484     // executing the remark task.
2485     set_concurrency(active_workers);
2486 
2487     // Set the degree of MT processing here.  If the discovery was done MT,
2488     // the number of threads involved during discovery could differ from
2489     // the number of active workers.  This is OK as long as the discovered
2490     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2491     rp->set_active_mt_degree(active_workers);
2492 
2493     // Process the weak references.
2494     rp->process_discovered_references(&g1_is_alive,
2495                                       &g1_keep_alive,
2496                                       &g1_drain_mark_stack,
2497                                       executor);
2498 
2499     // The do_oop work routines of the keep_alive and drain_marking_stack
2500     // oop closures will set the has_overflown flag if we overflow the
2501     // global marking stack.
2502 
2503     assert(_markStack.overflow() || _markStack.isEmpty(),
2504             "mark stack should be empty (unless it overflowed)");
2505 
2506     if (_markStack.overflow()) {
2507       // This should have been done already when we tried to push an
2508       // entry on to the global mark stack. But let's do it again.
2509       set_has_overflown();
2510     }
2511 
2512     assert(rp->num_q() == active_workers, "why not");
2513 
2514     rp->enqueue_discovered_references(executor);
2515 
2516     rp->verify_no_references_recorded();
2517     assert(!rp->discovery_enabled(), "Post condition");
2518   }
2519 
2520   // Now clean up stale oops in StringTable
2521   StringTable::unlink(&g1_is_alive);
2522   // Clean up unreferenced symbols in symbol table.
2523   SymbolTable::unlink();
2524 }
2525 
2526 void ConcurrentMark::swapMarkBitMaps() {
2527   CMBitMapRO* temp = _prevMarkBitMap;
2528   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2529   _nextMarkBitMap  = (CMBitMap*)  temp;
2530 }
2531 
2532 class CMRemarkTask: public AbstractGangTask {
2533 private:
2534   ConcurrentMark* _cm;
2535   bool            _is_serial;
2536 public:
2537   void work(uint worker_id) {
2538     // Since all available tasks are actually started, we should
2539     // only proceed if we're supposed to be actived.
2540     if (worker_id < _cm->active_tasks()) {
2541       CMTask* task = _cm->task(worker_id);
2542       task->record_start_time();
2543       do {
2544         task->do_marking_step(1000000000.0 /* something very large */,
2545                               true         /* do_termination       */,
2546                               _is_serial);
2547       } while (task->has_aborted() && !_cm->has_overflown());
2548       // If we overflow, then we do not want to restart. We instead
2549       // want to abort remark and do concurrent marking again.
2550       task->record_end_time();
2551     }
2552   }
2553 
2554   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2555     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2556     _cm->terminator()->reset_for_reuse(active_workers);
2557   }
2558 };
2559 
2560 void ConcurrentMark::checkpointRootsFinalWork() {
2561   ResourceMark rm;
2562   HandleMark   hm;
2563   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2564 
2565   g1h->ensure_parsability(false);
2566 
2567   if (G1CollectedHeap::use_parallel_gc_threads()) {
2568     G1CollectedHeap::StrongRootsScope srs(g1h);
2569     // this is remark, so we'll use up all active threads
2570     uint active_workers = g1h->workers()->active_workers();
2571     if (active_workers == 0) {
2572       assert(active_workers > 0, "Should have been set earlier");
2573       active_workers = (uint) ParallelGCThreads;
2574       g1h->workers()->set_active_workers(active_workers);
2575     }
2576     set_concurrency_and_phase(active_workers, false /* concurrent */);
2577     // Leave _parallel_marking_threads at it's
2578     // value originally calculated in the ConcurrentMark
2579     // constructor and pass values of the active workers
2580     // through the gang in the task.
2581 
2582     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2583     // We will start all available threads, even if we decide that the
2584     // active_workers will be fewer. The extra ones will just bail out
2585     // immediately.
2586     g1h->set_par_threads(active_workers);
2587     g1h->workers()->run_task(&remarkTask);
2588     g1h->set_par_threads(0);
2589   } else {
2590     G1CollectedHeap::StrongRootsScope srs(g1h);
2591     uint active_workers = 1;
2592     set_concurrency_and_phase(active_workers, false /* concurrent */);
2593 
2594     // Note - if there's no work gang then the VMThread will be
2595     // the thread to execute the remark - serially. We have
2596     // to pass true for the is_serial parameter so that
2597     // CMTask::do_marking_step() doesn't enter the sync
2598     // barriers in the event of an overflow. Doing so will
2599     // cause an assert that the current thread is not a
2600     // concurrent GC thread.
2601     CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2602     remarkTask.work(0);
2603   }
2604   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2605   guarantee(has_overflown() ||
2606             satb_mq_set.completed_buffers_num() == 0,
2607             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2608                     BOOL_TO_STR(has_overflown()),
2609                     satb_mq_set.completed_buffers_num()));
2610 
2611   print_stats();
2612 }
2613 
2614 #ifndef PRODUCT
2615 
2616 class PrintReachableOopClosure: public OopClosure {
2617 private:
2618   G1CollectedHeap* _g1h;
2619   outputStream*    _out;
2620   VerifyOption     _vo;
2621   bool             _all;
2622 
2623 public:
2624   PrintReachableOopClosure(outputStream* out,
2625                            VerifyOption  vo,
2626                            bool          all) :
2627     _g1h(G1CollectedHeap::heap()),
2628     _out(out), _vo(vo), _all(all) { }
2629 
2630   void do_oop(narrowOop* p) { do_oop_work(p); }
2631   void do_oop(      oop* p) { do_oop_work(p); }
2632 
2633   template <class T> void do_oop_work(T* p) {
2634     oop         obj = oopDesc::load_decode_heap_oop(p);
2635     const char* str = NULL;
2636     const char* str2 = "";
2637 
2638     if (obj == NULL) {
2639       str = "";
2640     } else if (!_g1h->is_in_g1_reserved(obj)) {
2641       str = " O";
2642     } else {
2643       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2644       guarantee(hr != NULL, "invariant");
2645       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2646       bool marked = _g1h->is_marked(obj, _vo);
2647 
2648       if (over_tams) {
2649         str = " >";
2650         if (marked) {
2651           str2 = " AND MARKED";
2652         }
2653       } else if (marked) {
2654         str = " M";
2655       } else {
2656         str = " NOT";
2657       }
2658     }
2659 
2660     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2661                    p, (void*) obj, str, str2);
2662   }
2663 };
2664 
2665 class PrintReachableObjectClosure : public ObjectClosure {
2666 private:
2667   G1CollectedHeap* _g1h;
2668   outputStream*    _out;
2669   VerifyOption     _vo;
2670   bool             _all;
2671   HeapRegion*      _hr;
2672 
2673 public:
2674   PrintReachableObjectClosure(outputStream* out,
2675                               VerifyOption  vo,
2676                               bool          all,
2677                               HeapRegion*   hr) :
2678     _g1h(G1CollectedHeap::heap()),
2679     _out(out), _vo(vo), _all(all), _hr(hr) { }
2680 
2681   void do_object(oop o) {
2682     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2683     bool marked = _g1h->is_marked(o, _vo);
2684     bool print_it = _all || over_tams || marked;
2685 
2686     if (print_it) {
2687       _out->print_cr(" "PTR_FORMAT"%s",
2688                      o, (over_tams) ? " >" : (marked) ? " M" : "");
2689       PrintReachableOopClosure oopCl(_out, _vo, _all);
2690       o->oop_iterate_no_header(&oopCl);
2691     }
2692   }
2693 };
2694 
2695 class PrintReachableRegionClosure : public HeapRegionClosure {
2696 private:
2697   G1CollectedHeap* _g1h;
2698   outputStream*    _out;
2699   VerifyOption     _vo;
2700   bool             _all;
2701 
2702 public:
2703   bool doHeapRegion(HeapRegion* hr) {
2704     HeapWord* b = hr->bottom();
2705     HeapWord* e = hr->end();
2706     HeapWord* t = hr->top();
2707     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2708     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2709                    "TAMS: "PTR_FORMAT, b, e, t, p);
2710     _out->cr();
2711 
2712     HeapWord* from = b;
2713     HeapWord* to   = t;
2714 
2715     if (to > from) {
2716       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2717       _out->cr();
2718       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2719       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2720       _out->cr();
2721     }
2722 
2723     return false;
2724   }
2725 
2726   PrintReachableRegionClosure(outputStream* out,
2727                               VerifyOption  vo,
2728                               bool          all) :
2729     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2730 };
2731 
2732 void ConcurrentMark::print_reachable(const char* str,
2733                                      VerifyOption vo,
2734                                      bool all) {
2735   gclog_or_tty->cr();
2736   gclog_or_tty->print_cr("== Doing heap dump... ");
2737 
2738   if (G1PrintReachableBaseFile == NULL) {
2739     gclog_or_tty->print_cr("  #### error: no base file defined");
2740     return;
2741   }
2742 
2743   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2744       (JVM_MAXPATHLEN - 1)) {
2745     gclog_or_tty->print_cr("  #### error: file name too long");
2746     return;
2747   }
2748 
2749   char file_name[JVM_MAXPATHLEN];
2750   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2751   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2752 
2753   fileStream fout(file_name);
2754   if (!fout.is_open()) {
2755     gclog_or_tty->print_cr("  #### error: could not open file");
2756     return;
2757   }
2758 
2759   outputStream* out = &fout;
2760   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2761   out->cr();
2762 
2763   out->print_cr("--- ITERATING OVER REGIONS");
2764   out->cr();
2765   PrintReachableRegionClosure rcl(out, vo, all);
2766   _g1h->heap_region_iterate(&rcl);
2767   out->cr();
2768 
2769   gclog_or_tty->print_cr("  done");
2770   gclog_or_tty->flush();
2771 }
2772 
2773 #endif // PRODUCT
2774 
2775 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2776   // Note we are overriding the read-only view of the prev map here, via
2777   // the cast.
2778   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2779 }
2780 
2781 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2782   _nextMarkBitMap->clearRange(mr);
2783 }
2784 
2785 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2786   clearRangePrevBitmap(mr);
2787   clearRangeNextBitmap(mr);
2788 }
2789 
2790 HeapRegion*
2791 ConcurrentMark::claim_region(uint worker_id) {
2792   // "checkpoint" the finger
2793   HeapWord* finger = _finger;
2794 
2795   // _heap_end will not change underneath our feet; it only changes at
2796   // yield points.
2797   while (finger < _heap_end) {
2798     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2799 
2800     // Note on how this code handles humongous regions. In the
2801     // normal case the finger will reach the start of a "starts
2802     // humongous" (SH) region. Its end will either be the end of the
2803     // last "continues humongous" (CH) region in the sequence, or the
2804     // standard end of the SH region (if the SH is the only region in
2805     // the sequence). That way claim_region() will skip over the CH
2806     // regions. However, there is a subtle race between a CM thread
2807     // executing this method and a mutator thread doing a humongous
2808     // object allocation. The two are not mutually exclusive as the CM
2809     // thread does not need to hold the Heap_lock when it gets
2810     // here. So there is a chance that claim_region() will come across
2811     // a free region that's in the progress of becoming a SH or a CH
2812     // region. In the former case, it will either
2813     //   a) Miss the update to the region's end, in which case it will
2814     //      visit every subsequent CH region, will find their bitmaps
2815     //      empty, and do nothing, or
2816     //   b) Will observe the update of the region's end (in which case
2817     //      it will skip the subsequent CH regions).
2818     // If it comes across a region that suddenly becomes CH, the
2819     // scenario will be similar to b). So, the race between
2820     // claim_region() and a humongous object allocation might force us
2821     // to do a bit of unnecessary work (due to some unnecessary bitmap
2822     // iterations) but it should not introduce and correctness issues.
2823     HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
2824     HeapWord*   bottom        = curr_region->bottom();
2825     HeapWord*   end           = curr_region->end();
2826     HeapWord*   limit         = curr_region->next_top_at_mark_start();
2827 
2828     if (verbose_low()) {
2829       gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2830                              "["PTR_FORMAT", "PTR_FORMAT"), "
2831                              "limit = "PTR_FORMAT,
2832                              worker_id, curr_region, bottom, end, limit);
2833     }
2834 
2835     // Is the gap between reading the finger and doing the CAS too long?
2836     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2837     if (res == finger) {
2838       // we succeeded
2839 
2840       // notice that _finger == end cannot be guaranteed here since,
2841       // someone else might have moved the finger even further
2842       assert(_finger >= end, "the finger should have moved forward");
2843 
2844       if (verbose_low()) {
2845         gclog_or_tty->print_cr("[%u] we were successful with region = "
2846                                PTR_FORMAT, worker_id, curr_region);
2847       }
2848 
2849       if (limit > bottom) {
2850         if (verbose_low()) {
2851           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2852                                  "returning it ", worker_id, curr_region);
2853         }
2854         return curr_region;
2855       } else {
2856         assert(limit == bottom,
2857                "the region limit should be at bottom");
2858         if (verbose_low()) {
2859           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2860                                  "returning NULL", worker_id, curr_region);
2861         }
2862         // we return NULL and the caller should try calling
2863         // claim_region() again.
2864         return NULL;
2865       }
2866     } else {
2867       assert(_finger > finger, "the finger should have moved forward");
2868       if (verbose_low()) {
2869         gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2870                                "global finger = "PTR_FORMAT", "
2871                                "our finger = "PTR_FORMAT,
2872                                worker_id, _finger, finger);
2873       }
2874 
2875       // read it again
2876       finger = _finger;
2877     }
2878   }
2879 
2880   return NULL;
2881 }
2882 
2883 #ifndef PRODUCT
2884 enum VerifyNoCSetOopsPhase {
2885   VerifyNoCSetOopsStack,
2886   VerifyNoCSetOopsQueues,
2887   VerifyNoCSetOopsSATBCompleted,
2888   VerifyNoCSetOopsSATBThread
2889 };
2890 
2891 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2892 private:
2893   G1CollectedHeap* _g1h;
2894   VerifyNoCSetOopsPhase _phase;
2895   int _info;
2896 
2897   const char* phase_str() {
2898     switch (_phase) {
2899     case VerifyNoCSetOopsStack:         return "Stack";
2900     case VerifyNoCSetOopsQueues:        return "Queue";
2901     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2902     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2903     default:                            ShouldNotReachHere();
2904     }
2905     return NULL;
2906   }
2907 
2908   void do_object_work(oop obj) {
2909     guarantee(!_g1h->obj_in_cs(obj),
2910               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2911                       (void*) obj, phase_str(), _info));
2912   }
2913 
2914 public:
2915   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2916 
2917   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2918     _phase = phase;
2919     _info = info;
2920   }
2921 
2922   virtual void do_oop(oop* p) {
2923     oop obj = oopDesc::load_decode_heap_oop(p);
2924     do_object_work(obj);
2925   }
2926 
2927   virtual void do_oop(narrowOop* p) {
2928     // We should not come across narrow oops while scanning marking
2929     // stacks and SATB buffers.
2930     ShouldNotReachHere();
2931   }
2932 
2933   virtual void do_object(oop obj) {
2934     do_object_work(obj);
2935   }
2936 };
2937 
2938 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2939                                          bool verify_enqueued_buffers,
2940                                          bool verify_thread_buffers,
2941                                          bool verify_fingers) {
2942   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2943   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2944     return;
2945   }
2946 
2947   VerifyNoCSetOopsClosure cl;
2948 
2949   if (verify_stacks) {
2950     // Verify entries on the global mark stack
2951     cl.set_phase(VerifyNoCSetOopsStack);
2952     _markStack.oops_do(&cl);
2953 
2954     // Verify entries on the task queues
2955     for (uint i = 0; i < _max_worker_id; i += 1) {
2956       cl.set_phase(VerifyNoCSetOopsQueues, i);
2957       CMTaskQueue* queue = _task_queues->queue(i);
2958       queue->oops_do(&cl);
2959     }
2960   }
2961 
2962   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2963 
2964   // Verify entries on the enqueued SATB buffers
2965   if (verify_enqueued_buffers) {
2966     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2967     satb_qs.iterate_completed_buffers_read_only(&cl);
2968   }
2969 
2970   // Verify entries on the per-thread SATB buffers
2971   if (verify_thread_buffers) {
2972     cl.set_phase(VerifyNoCSetOopsSATBThread);
2973     satb_qs.iterate_thread_buffers_read_only(&cl);
2974   }
2975 
2976   if (verify_fingers) {
2977     // Verify the global finger
2978     HeapWord* global_finger = finger();
2979     if (global_finger != NULL && global_finger < _heap_end) {
2980       // The global finger always points to a heap region boundary. We
2981       // use heap_region_containing_raw() to get the containing region
2982       // given that the global finger could be pointing to a free region
2983       // which subsequently becomes continues humongous. If that
2984       // happens, heap_region_containing() will return the bottom of the
2985       // corresponding starts humongous region and the check below will
2986       // not hold any more.
2987       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2988       guarantee(global_finger == global_hr->bottom(),
2989                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2990                         global_finger, HR_FORMAT_PARAMS(global_hr)));
2991     }
2992 
2993     // Verify the task fingers
2994     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2995     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2996       CMTask* task = _tasks[i];
2997       HeapWord* task_finger = task->finger();
2998       if (task_finger != NULL && task_finger < _heap_end) {
2999         // See above note on the global finger verification.
3000         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3001         guarantee(task_finger == task_hr->bottom() ||
3002                   !task_hr->in_collection_set(),
3003                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3004                           task_finger, HR_FORMAT_PARAMS(task_hr)));
3005       }
3006     }
3007   }
3008 }
3009 #endif // PRODUCT
3010 
3011 // Aggregate the counting data that was constructed concurrently
3012 // with marking.
3013 class AggregateCountDataHRClosure: public HeapRegionClosure {
3014   G1CollectedHeap* _g1h;
3015   ConcurrentMark* _cm;
3016   CardTableModRefBS* _ct_bs;
3017   BitMap* _cm_card_bm;
3018   uint _max_worker_id;
3019 
3020  public:
3021   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3022                               BitMap* cm_card_bm,
3023                               uint max_worker_id) :
3024     _g1h(g1h), _cm(g1h->concurrent_mark()),
3025     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3026     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3027 
3028   bool doHeapRegion(HeapRegion* hr) {
3029     if (hr->continuesHumongous()) {
3030       // We will ignore these here and process them when their
3031       // associated "starts humongous" region is processed.
3032       // Note that we cannot rely on their associated
3033       // "starts humongous" region to have their bit set to 1
3034       // since, due to the region chunking in the parallel region
3035       // iteration, a "continues humongous" region might be visited
3036       // before its associated "starts humongous".
3037       return false;
3038     }
3039 
3040     HeapWord* start = hr->bottom();
3041     HeapWord* limit = hr->next_top_at_mark_start();
3042     HeapWord* end = hr->end();
3043 
3044     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3045            err_msg("Preconditions not met - "
3046                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3047                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3048                    start, limit, hr->top(), hr->end()));
3049 
3050     assert(hr->next_marked_bytes() == 0, "Precondition");
3051 
3052     if (start == limit) {
3053       // NTAMS of this region has not been set so nothing to do.
3054       return false;
3055     }
3056 
3057     // 'start' should be in the heap.
3058     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3059     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3060     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3061 
3062     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3063     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3064     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3065 
3066     // If ntams is not card aligned then we bump card bitmap index
3067     // for limit so that we get the all the cards spanned by
3068     // the object ending at ntams.
3069     // Note: if this is the last region in the heap then ntams
3070     // could be actually just beyond the end of the the heap;
3071     // limit_idx will then  correspond to a (non-existent) card
3072     // that is also outside the heap.
3073     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3074       limit_idx += 1;
3075     }
3076 
3077     assert(limit_idx <= end_idx, "or else use atomics");
3078 
3079     // Aggregate the "stripe" in the count data associated with hr.
3080     uint hrs_index = hr->hrs_index();
3081     size_t marked_bytes = 0;
3082 
3083     for (uint i = 0; i < _max_worker_id; i += 1) {
3084       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3085       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3086 
3087       // Fetch the marked_bytes in this region for task i and
3088       // add it to the running total for this region.
3089       marked_bytes += marked_bytes_array[hrs_index];
3090 
3091       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3092       // into the global card bitmap.
3093       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3094 
3095       while (scan_idx < limit_idx) {
3096         assert(task_card_bm->at(scan_idx) == true, "should be");
3097         _cm_card_bm->set_bit(scan_idx);
3098         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3099 
3100         // BitMap::get_next_one_offset() can handle the case when
3101         // its left_offset parameter is greater than its right_offset
3102         // parameter. It does, however, have an early exit if
3103         // left_offset == right_offset. So let's limit the value
3104         // passed in for left offset here.
3105         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3106         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3107       }
3108     }
3109 
3110     // Update the marked bytes for this region.
3111     hr->add_to_marked_bytes(marked_bytes);
3112 
3113     // Next heap region
3114     return false;
3115   }
3116 };
3117 
3118 class G1AggregateCountDataTask: public AbstractGangTask {
3119 protected:
3120   G1CollectedHeap* _g1h;
3121   ConcurrentMark* _cm;
3122   BitMap* _cm_card_bm;
3123   uint _max_worker_id;
3124   int _active_workers;
3125 
3126 public:
3127   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3128                            ConcurrentMark* cm,
3129                            BitMap* cm_card_bm,
3130                            uint max_worker_id,
3131                            int n_workers) :
3132     AbstractGangTask("Count Aggregation"),
3133     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3134     _max_worker_id(max_worker_id),
3135     _active_workers(n_workers) { }
3136 
3137   void work(uint worker_id) {
3138     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3139 
3140     if (G1CollectedHeap::use_parallel_gc_threads()) {
3141       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3142                                             _active_workers,
3143                                             HeapRegion::AggregateCountClaimValue);
3144     } else {
3145       _g1h->heap_region_iterate(&cl);
3146     }
3147   }
3148 };
3149 
3150 
3151 void ConcurrentMark::aggregate_count_data() {
3152   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3153                         _g1h->workers()->active_workers() :
3154                         1);
3155 
3156   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3157                                            _max_worker_id, n_workers);
3158 
3159   if (G1CollectedHeap::use_parallel_gc_threads()) {
3160     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3161            "sanity check");
3162     _g1h->set_par_threads(n_workers);
3163     _g1h->workers()->run_task(&g1_par_agg_task);
3164     _g1h->set_par_threads(0);
3165 
3166     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3167            "sanity check");
3168     _g1h->reset_heap_region_claim_values();
3169   } else {
3170     g1_par_agg_task.work(0);
3171   }
3172 }
3173 
3174 // Clear the per-worker arrays used to store the per-region counting data
3175 void ConcurrentMark::clear_all_count_data() {
3176   // Clear the global card bitmap - it will be filled during
3177   // liveness count aggregation (during remark) and the
3178   // final counting task.
3179   _card_bm.clear();
3180 
3181   // Clear the global region bitmap - it will be filled as part
3182   // of the final counting task.
3183   _region_bm.clear();
3184 
3185   uint max_regions = _g1h->max_regions();
3186   assert(_max_worker_id > 0, "uninitialized");
3187 
3188   for (uint i = 0; i < _max_worker_id; i += 1) {
3189     BitMap* task_card_bm = count_card_bitmap_for(i);
3190     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3191 
3192     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3193     assert(marked_bytes_array != NULL, "uninitialized");
3194 
3195     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3196     task_card_bm->clear();
3197   }
3198 }
3199 
3200 void ConcurrentMark::print_stats() {
3201   if (verbose_stats()) {
3202     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3203     for (size_t i = 0; i < _active_tasks; ++i) {
3204       _tasks[i]->print_stats();
3205       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3206     }
3207   }
3208 }
3209 
3210 // abandon current marking iteration due to a Full GC
3211 void ConcurrentMark::abort() {
3212   // Clear all marks to force marking thread to do nothing
3213   _nextMarkBitMap->clearAll();
3214   // Clear the liveness counting data
3215   clear_all_count_data();
3216   // Empty mark stack
3217   reset_marking_state();
3218   for (uint i = 0; i < _max_worker_id; ++i) {
3219     _tasks[i]->clear_region_fields();
3220   }
3221   _has_aborted = true;
3222 
3223   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3224   satb_mq_set.abandon_partial_marking();
3225   // This can be called either during or outside marking, we'll read
3226   // the expected_active value from the SATB queue set.
3227   satb_mq_set.set_active_all_threads(
3228                                  false, /* new active value */
3229                                  satb_mq_set.is_active() /* expected_active */);
3230 }
3231 
3232 static void print_ms_time_info(const char* prefix, const char* name,
3233                                NumberSeq& ns) {
3234   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3235                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3236   if (ns.num() > 0) {
3237     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3238                            prefix, ns.sd(), ns.maximum());
3239   }
3240 }
3241 
3242 void ConcurrentMark::print_summary_info() {
3243   gclog_or_tty->print_cr(" Concurrent marking:");
3244   print_ms_time_info("  ", "init marks", _init_times);
3245   print_ms_time_info("  ", "remarks", _remark_times);
3246   {
3247     print_ms_time_info("     ", "final marks", _remark_mark_times);
3248     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3249 
3250   }
3251   print_ms_time_info("  ", "cleanups", _cleanup_times);
3252   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3253                          _total_counting_time,
3254                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3255                           (double)_cleanup_times.num()
3256                          : 0.0));
3257   if (G1ScrubRemSets) {
3258     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3259                            _total_rs_scrub_time,
3260                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3261                             (double)_cleanup_times.num()
3262                            : 0.0));
3263   }
3264   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3265                          (_init_times.sum() + _remark_times.sum() +
3266                           _cleanup_times.sum())/1000.0);
3267   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3268                 "(%8.2f s marking).",
3269                 cmThread()->vtime_accum(),
3270                 cmThread()->vtime_mark_accum());
3271 }
3272 
3273 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3274   if (use_parallel_marking_threads()) {
3275     _parallel_workers->print_worker_threads_on(st);
3276   }
3277 }
3278 
3279 void ConcurrentMark::print_on_error(outputStream* st) const {
3280   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3281       _prevMarkBitMap, _nextMarkBitMap);
3282   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3283   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3284 }
3285 
3286 // We take a break if someone is trying to stop the world.
3287 bool ConcurrentMark::do_yield_check(uint worker_id) {
3288   if (should_yield()) {
3289     if (worker_id == 0) {
3290       _g1h->g1_policy()->record_concurrent_pause();
3291     }
3292     cmThread()->yield();
3293     return true;
3294   } else {
3295     return false;
3296   }
3297 }
3298 
3299 bool ConcurrentMark::should_yield() {
3300   return cmThread()->should_yield();
3301 }
3302 
3303 bool ConcurrentMark::containing_card_is_marked(void* p) {
3304   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3305   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3306 }
3307 
3308 bool ConcurrentMark::containing_cards_are_marked(void* start,
3309                                                  void* last) {
3310   return containing_card_is_marked(start) &&
3311          containing_card_is_marked(last);
3312 }
3313 
3314 #ifndef PRODUCT
3315 // for debugging purposes
3316 void ConcurrentMark::print_finger() {
3317   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3318                          _heap_start, _heap_end, _finger);
3319   for (uint i = 0; i < _max_worker_id; ++i) {
3320     gclog_or_tty->print("   %u: "PTR_FORMAT, i, _tasks[i]->finger());
3321   }
3322   gclog_or_tty->print_cr("");
3323 }
3324 #endif
3325 
3326 void CMTask::scan_object(oop obj) {
3327   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3328 
3329   if (_cm->verbose_high()) {
3330     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3331                            _worker_id, (void*) obj);
3332   }
3333 
3334   size_t obj_size = obj->size();
3335   _words_scanned += obj_size;
3336 
3337   obj->oop_iterate(_cm_oop_closure);
3338   statsOnly( ++_objs_scanned );
3339   check_limits();
3340 }
3341 
3342 // Closure for iteration over bitmaps
3343 class CMBitMapClosure : public BitMapClosure {
3344 private:
3345   // the bitmap that is being iterated over
3346   CMBitMap*                   _nextMarkBitMap;
3347   ConcurrentMark*             _cm;
3348   CMTask*                     _task;
3349 
3350 public:
3351   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3352     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3353 
3354   bool do_bit(size_t offset) {
3355     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3356     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3357     assert( addr < _cm->finger(), "invariant");
3358 
3359     statsOnly( _task->increase_objs_found_on_bitmap() );
3360     assert(addr >= _task->finger(), "invariant");
3361 
3362     // We move that task's local finger along.
3363     _task->move_finger_to(addr);
3364 
3365     _task->scan_object(oop(addr));
3366     // we only partially drain the local queue and global stack
3367     _task->drain_local_queue(true);
3368     _task->drain_global_stack(true);
3369 
3370     // if the has_aborted flag has been raised, we need to bail out of
3371     // the iteration
3372     return !_task->has_aborted();
3373   }
3374 };
3375 
3376 // Closure for iterating over objects, currently only used for
3377 // processing SATB buffers.
3378 class CMObjectClosure : public ObjectClosure {
3379 private:
3380   CMTask* _task;
3381 
3382 public:
3383   void do_object(oop obj) {
3384     _task->deal_with_reference(obj);
3385   }
3386 
3387   CMObjectClosure(CMTask* task) : _task(task) { }
3388 };
3389 
3390 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3391                                ConcurrentMark* cm,
3392                                CMTask* task)
3393   : _g1h(g1h), _cm(cm), _task(task) {
3394   assert(_ref_processor == NULL, "should be initialized to NULL");
3395 
3396   if (G1UseConcMarkReferenceProcessing) {
3397     _ref_processor = g1h->ref_processor_cm();
3398     assert(_ref_processor != NULL, "should not be NULL");
3399   }
3400 }
3401 
3402 void CMTask::setup_for_region(HeapRegion* hr) {
3403   // Separated the asserts so that we know which one fires.
3404   assert(hr != NULL,
3405         "claim_region() should have filtered out continues humongous regions");
3406   assert(!hr->continuesHumongous(),
3407         "claim_region() should have filtered out continues humongous regions");
3408 
3409   if (_cm->verbose_low()) {
3410     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3411                            _worker_id, hr);
3412   }
3413 
3414   _curr_region  = hr;
3415   _finger       = hr->bottom();
3416   update_region_limit();
3417 }
3418 
3419 void CMTask::update_region_limit() {
3420   HeapRegion* hr            = _curr_region;
3421   HeapWord* bottom          = hr->bottom();
3422   HeapWord* limit           = hr->next_top_at_mark_start();
3423 
3424   if (limit == bottom) {
3425     if (_cm->verbose_low()) {
3426       gclog_or_tty->print_cr("[%u] found an empty region "
3427                              "["PTR_FORMAT", "PTR_FORMAT")",
3428                              _worker_id, bottom, limit);
3429     }
3430     // The region was collected underneath our feet.
3431     // We set the finger to bottom to ensure that the bitmap
3432     // iteration that will follow this will not do anything.
3433     // (this is not a condition that holds when we set the region up,
3434     // as the region is not supposed to be empty in the first place)
3435     _finger = bottom;
3436   } else if (limit >= _region_limit) {
3437     assert(limit >= _finger, "peace of mind");
3438   } else {
3439     assert(limit < _region_limit, "only way to get here");
3440     // This can happen under some pretty unusual circumstances.  An
3441     // evacuation pause empties the region underneath our feet (NTAMS
3442     // at bottom). We then do some allocation in the region (NTAMS
3443     // stays at bottom), followed by the region being used as a GC
3444     // alloc region (NTAMS will move to top() and the objects
3445     // originally below it will be grayed). All objects now marked in
3446     // the region are explicitly grayed, if below the global finger,
3447     // and we do not need in fact to scan anything else. So, we simply
3448     // set _finger to be limit to ensure that the bitmap iteration
3449     // doesn't do anything.
3450     _finger = limit;
3451   }
3452 
3453   _region_limit = limit;
3454 }
3455 
3456 void CMTask::giveup_current_region() {
3457   assert(_curr_region != NULL, "invariant");
3458   if (_cm->verbose_low()) {
3459     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3460                            _worker_id, _curr_region);
3461   }
3462   clear_region_fields();
3463 }
3464 
3465 void CMTask::clear_region_fields() {
3466   // Values for these three fields that indicate that we're not
3467   // holding on to a region.
3468   _curr_region   = NULL;
3469   _finger        = NULL;
3470   _region_limit  = NULL;
3471 }
3472 
3473 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3474   if (cm_oop_closure == NULL) {
3475     assert(_cm_oop_closure != NULL, "invariant");
3476   } else {
3477     assert(_cm_oop_closure == NULL, "invariant");
3478   }
3479   _cm_oop_closure = cm_oop_closure;
3480 }
3481 
3482 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3483   guarantee(nextMarkBitMap != NULL, "invariant");
3484 
3485   if (_cm->verbose_low()) {
3486     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3487   }
3488 
3489   _nextMarkBitMap                = nextMarkBitMap;
3490   clear_region_fields();
3491 
3492   _calls                         = 0;
3493   _elapsed_time_ms               = 0.0;
3494   _termination_time_ms           = 0.0;
3495   _termination_start_time_ms     = 0.0;
3496 
3497 #if _MARKING_STATS_
3498   _local_pushes                  = 0;
3499   _local_pops                    = 0;
3500   _local_max_size                = 0;
3501   _objs_scanned                  = 0;
3502   _global_pushes                 = 0;
3503   _global_pops                   = 0;
3504   _global_max_size               = 0;
3505   _global_transfers_to           = 0;
3506   _global_transfers_from         = 0;
3507   _regions_claimed               = 0;
3508   _objs_found_on_bitmap          = 0;
3509   _satb_buffers_processed        = 0;
3510   _steal_attempts                = 0;
3511   _steals                        = 0;
3512   _aborted                       = 0;
3513   _aborted_overflow              = 0;
3514   _aborted_cm_aborted            = 0;
3515   _aborted_yield                 = 0;
3516   _aborted_timed_out             = 0;
3517   _aborted_satb                  = 0;
3518   _aborted_termination           = 0;
3519 #endif // _MARKING_STATS_
3520 }
3521 
3522 bool CMTask::should_exit_termination() {
3523   regular_clock_call();
3524   // This is called when we are in the termination protocol. We should
3525   // quit if, for some reason, this task wants to abort or the global
3526   // stack is not empty (this means that we can get work from it).
3527   return !_cm->mark_stack_empty() || has_aborted();
3528 }
3529 
3530 void CMTask::reached_limit() {
3531   assert(_words_scanned >= _words_scanned_limit ||
3532          _refs_reached >= _refs_reached_limit ,
3533          "shouldn't have been called otherwise");
3534   regular_clock_call();
3535 }
3536 
3537 void CMTask::regular_clock_call() {
3538   if (has_aborted()) return;
3539 
3540   // First, we need to recalculate the words scanned and refs reached
3541   // limits for the next clock call.
3542   recalculate_limits();
3543 
3544   // During the regular clock call we do the following
3545 
3546   // (1) If an overflow has been flagged, then we abort.
3547   if (_cm->has_overflown()) {
3548     set_has_aborted();
3549     return;
3550   }
3551 
3552   // If we are not concurrent (i.e. we're doing remark) we don't need
3553   // to check anything else. The other steps are only needed during
3554   // the concurrent marking phase.
3555   if (!concurrent()) return;
3556 
3557   // (2) If marking has been aborted for Full GC, then we also abort.
3558   if (_cm->has_aborted()) {
3559     set_has_aborted();
3560     statsOnly( ++_aborted_cm_aborted );
3561     return;
3562   }
3563 
3564   double curr_time_ms = os::elapsedVTime() * 1000.0;
3565 
3566   // (3) If marking stats are enabled, then we update the step history.
3567 #if _MARKING_STATS_
3568   if (_words_scanned >= _words_scanned_limit) {
3569     ++_clock_due_to_scanning;
3570   }
3571   if (_refs_reached >= _refs_reached_limit) {
3572     ++_clock_due_to_marking;
3573   }
3574 
3575   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3576   _interval_start_time_ms = curr_time_ms;
3577   _all_clock_intervals_ms.add(last_interval_ms);
3578 
3579   if (_cm->verbose_medium()) {
3580       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3581                         "scanned = %d%s, refs reached = %d%s",
3582                         _worker_id, last_interval_ms,
3583                         _words_scanned,
3584                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3585                         _refs_reached,
3586                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3587   }
3588 #endif // _MARKING_STATS_
3589 
3590   // (4) We check whether we should yield. If we have to, then we abort.
3591   if (_cm->should_yield()) {
3592     // We should yield. To do this we abort the task. The caller is
3593     // responsible for yielding.
3594     set_has_aborted();
3595     statsOnly( ++_aborted_yield );
3596     return;
3597   }
3598 
3599   // (5) We check whether we've reached our time quota. If we have,
3600   // then we abort.
3601   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3602   if (elapsed_time_ms > _time_target_ms) {
3603     set_has_aborted();
3604     _has_timed_out = true;
3605     statsOnly( ++_aborted_timed_out );
3606     return;
3607   }
3608 
3609   // (6) Finally, we check whether there are enough completed STAB
3610   // buffers available for processing. If there are, we abort.
3611   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3612   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3613     if (_cm->verbose_low()) {
3614       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3615                              _worker_id);
3616     }
3617     // we do need to process SATB buffers, we'll abort and restart
3618     // the marking task to do so
3619     set_has_aborted();
3620     statsOnly( ++_aborted_satb );
3621     return;
3622   }
3623 }
3624 
3625 void CMTask::recalculate_limits() {
3626   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3627   _words_scanned_limit      = _real_words_scanned_limit;
3628 
3629   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3630   _refs_reached_limit       = _real_refs_reached_limit;
3631 }
3632 
3633 void CMTask::decrease_limits() {
3634   // This is called when we believe that we're going to do an infrequent
3635   // operation which will increase the per byte scanned cost (i.e. move
3636   // entries to/from the global stack). It basically tries to decrease the
3637   // scanning limit so that the clock is called earlier.
3638 
3639   if (_cm->verbose_medium()) {
3640     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3641   }
3642 
3643   _words_scanned_limit = _real_words_scanned_limit -
3644     3 * words_scanned_period / 4;
3645   _refs_reached_limit  = _real_refs_reached_limit -
3646     3 * refs_reached_period / 4;
3647 }
3648 
3649 void CMTask::move_entries_to_global_stack() {
3650   // local array where we'll store the entries that will be popped
3651   // from the local queue
3652   oop buffer[global_stack_transfer_size];
3653 
3654   int n = 0;
3655   oop obj;
3656   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3657     buffer[n] = obj;
3658     ++n;
3659   }
3660 
3661   if (n > 0) {
3662     // we popped at least one entry from the local queue
3663 
3664     statsOnly( ++_global_transfers_to; _local_pops += n );
3665 
3666     if (!_cm->mark_stack_push(buffer, n)) {
3667       if (_cm->verbose_low()) {
3668         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3669                                _worker_id);
3670       }
3671       set_has_aborted();
3672     } else {
3673       // the transfer was successful
3674 
3675       if (_cm->verbose_medium()) {
3676         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3677                                _worker_id, n);
3678       }
3679       statsOnly( int tmp_size = _cm->mark_stack_size();
3680                  if (tmp_size > _global_max_size) {
3681                    _global_max_size = tmp_size;
3682                  }
3683                  _global_pushes += n );
3684     }
3685   }
3686 
3687   // this operation was quite expensive, so decrease the limits
3688   decrease_limits();
3689 }
3690 
3691 void CMTask::get_entries_from_global_stack() {
3692   // local array where we'll store the entries that will be popped
3693   // from the global stack.
3694   oop buffer[global_stack_transfer_size];
3695   int n;
3696   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3697   assert(n <= global_stack_transfer_size,
3698          "we should not pop more than the given limit");
3699   if (n > 0) {
3700     // yes, we did actually pop at least one entry
3701 
3702     statsOnly( ++_global_transfers_from; _global_pops += n );
3703     if (_cm->verbose_medium()) {
3704       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3705                              _worker_id, n);
3706     }
3707     for (int i = 0; i < n; ++i) {
3708       bool success = _task_queue->push(buffer[i]);
3709       // We only call this when the local queue is empty or under a
3710       // given target limit. So, we do not expect this push to fail.
3711       assert(success, "invariant");
3712     }
3713 
3714     statsOnly( int tmp_size = _task_queue->size();
3715                if (tmp_size > _local_max_size) {
3716                  _local_max_size = tmp_size;
3717                }
3718                _local_pushes += n );
3719   }
3720 
3721   // this operation was quite expensive, so decrease the limits
3722   decrease_limits();
3723 }
3724 
3725 void CMTask::drain_local_queue(bool partially) {
3726   if (has_aborted()) return;
3727 
3728   // Decide what the target size is, depending whether we're going to
3729   // drain it partially (so that other tasks can steal if they run out
3730   // of things to do) or totally (at the very end).
3731   size_t target_size;
3732   if (partially) {
3733     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3734   } else {
3735     target_size = 0;
3736   }
3737 
3738   if (_task_queue->size() > target_size) {
3739     if (_cm->verbose_high()) {
3740       gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
3741                              _worker_id, target_size);
3742     }
3743 
3744     oop obj;
3745     bool ret = _task_queue->pop_local(obj);
3746     while (ret) {
3747       statsOnly( ++_local_pops );
3748 
3749       if (_cm->verbose_high()) {
3750         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3751                                (void*) obj);
3752       }
3753 
3754       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3755       assert(!_g1h->is_on_master_free_list(
3756                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3757 
3758       scan_object(obj);
3759 
3760       if (_task_queue->size() <= target_size || has_aborted()) {
3761         ret = false;
3762       } else {
3763         ret = _task_queue->pop_local(obj);
3764       }
3765     }
3766 
3767     if (_cm->verbose_high()) {
3768       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3769                              _worker_id, _task_queue->size());
3770     }
3771   }
3772 }
3773 
3774 void CMTask::drain_global_stack(bool partially) {
3775   if (has_aborted()) return;
3776 
3777   // We have a policy to drain the local queue before we attempt to
3778   // drain the global stack.
3779   assert(partially || _task_queue->size() == 0, "invariant");
3780 
3781   // Decide what the target size is, depending whether we're going to
3782   // drain it partially (so that other tasks can steal if they run out
3783   // of things to do) or totally (at the very end).  Notice that,
3784   // because we move entries from the global stack in chunks or
3785   // because another task might be doing the same, we might in fact
3786   // drop below the target. But, this is not a problem.
3787   size_t target_size;
3788   if (partially) {
3789     target_size = _cm->partial_mark_stack_size_target();
3790   } else {
3791     target_size = 0;
3792   }
3793 
3794   if (_cm->mark_stack_size() > target_size) {
3795     if (_cm->verbose_low()) {
3796       gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
3797                              _worker_id, target_size);
3798     }
3799 
3800     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3801       get_entries_from_global_stack();
3802       drain_local_queue(partially);
3803     }
3804 
3805     if (_cm->verbose_low()) {
3806       gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
3807                              _worker_id, _cm->mark_stack_size());
3808     }
3809   }
3810 }
3811 
3812 // SATB Queue has several assumptions on whether to call the par or
3813 // non-par versions of the methods. this is why some of the code is
3814 // replicated. We should really get rid of the single-threaded version
3815 // of the code to simplify things.
3816 void CMTask::drain_satb_buffers() {
3817   if (has_aborted()) return;
3818 
3819   // We set this so that the regular clock knows that we're in the
3820   // middle of draining buffers and doesn't set the abort flag when it
3821   // notices that SATB buffers are available for draining. It'd be
3822   // very counter productive if it did that. :-)
3823   _draining_satb_buffers = true;
3824 
3825   CMObjectClosure oc(this);
3826   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3827   if (G1CollectedHeap::use_parallel_gc_threads()) {
3828     satb_mq_set.set_par_closure(_worker_id, &oc);
3829   } else {
3830     satb_mq_set.set_closure(&oc);
3831   }
3832 
3833   // This keeps claiming and applying the closure to completed buffers
3834   // until we run out of buffers or we need to abort.
3835   if (G1CollectedHeap::use_parallel_gc_threads()) {
3836     while (!has_aborted() &&
3837            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3838       if (_cm->verbose_medium()) {
3839         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3840       }
3841       statsOnly( ++_satb_buffers_processed );
3842       regular_clock_call();
3843     }
3844   } else {
3845     while (!has_aborted() &&
3846            satb_mq_set.apply_closure_to_completed_buffer()) {
3847       if (_cm->verbose_medium()) {
3848         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3849       }
3850       statsOnly( ++_satb_buffers_processed );
3851       regular_clock_call();
3852     }
3853   }
3854 
3855   if (!concurrent() && !has_aborted()) {
3856     // We should only do this during remark.
3857     if (G1CollectedHeap::use_parallel_gc_threads()) {
3858       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3859     } else {
3860       satb_mq_set.iterate_closure_all_threads();
3861     }
3862   }
3863 
3864   _draining_satb_buffers = false;
3865 
3866   assert(has_aborted() ||
3867          concurrent() ||
3868          satb_mq_set.completed_buffers_num() == 0, "invariant");
3869 
3870   if (G1CollectedHeap::use_parallel_gc_threads()) {
3871     satb_mq_set.set_par_closure(_worker_id, NULL);
3872   } else {
3873     satb_mq_set.set_closure(NULL);
3874   }
3875 
3876   // again, this was a potentially expensive operation, decrease the
3877   // limits to get the regular clock call early
3878   decrease_limits();
3879 }
3880 
3881 void CMTask::print_stats() {
3882   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3883                          _worker_id, _calls);
3884   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3885                          _elapsed_time_ms, _termination_time_ms);
3886   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3887                          _step_times_ms.num(), _step_times_ms.avg(),
3888                          _step_times_ms.sd());
3889   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3890                          _step_times_ms.maximum(), _step_times_ms.sum());
3891 
3892 #if _MARKING_STATS_
3893   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3894                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3895                          _all_clock_intervals_ms.sd());
3896   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3897                          _all_clock_intervals_ms.maximum(),
3898                          _all_clock_intervals_ms.sum());
3899   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
3900                          _clock_due_to_scanning, _clock_due_to_marking);
3901   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
3902                          _objs_scanned, _objs_found_on_bitmap);
3903   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
3904                          _local_pushes, _local_pops, _local_max_size);
3905   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
3906                          _global_pushes, _global_pops, _global_max_size);
3907   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
3908                          _global_transfers_to,_global_transfers_from);
3909   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
3910   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
3911   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
3912                          _steal_attempts, _steals);
3913   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
3914   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
3915                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3916   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
3917                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3918 #endif // _MARKING_STATS_
3919 }
3920 
3921 /*****************************************************************************
3922 
3923     The do_marking_step(time_target_ms, ...) method is the building
3924     block of the parallel marking framework. It can be called in parallel
3925     with other invocations of do_marking_step() on different tasks
3926     (but only one per task, obviously) and concurrently with the
3927     mutator threads, or during remark, hence it eliminates the need
3928     for two versions of the code. When called during remark, it will
3929     pick up from where the task left off during the concurrent marking
3930     phase. Interestingly, tasks are also claimable during evacuation
3931     pauses too, since do_marking_step() ensures that it aborts before
3932     it needs to yield.
3933 
3934     The data structures that it uses to do marking work are the
3935     following:
3936 
3937       (1) Marking Bitmap. If there are gray objects that appear only
3938       on the bitmap (this happens either when dealing with an overflow
3939       or when the initial marking phase has simply marked the roots
3940       and didn't push them on the stack), then tasks claim heap
3941       regions whose bitmap they then scan to find gray objects. A
3942       global finger indicates where the end of the last claimed region
3943       is. A local finger indicates how far into the region a task has
3944       scanned. The two fingers are used to determine how to gray an
3945       object (i.e. whether simply marking it is OK, as it will be
3946       visited by a task in the future, or whether it needs to be also
3947       pushed on a stack).
3948 
3949       (2) Local Queue. The local queue of the task which is accessed
3950       reasonably efficiently by the task. Other tasks can steal from
3951       it when they run out of work. Throughout the marking phase, a
3952       task attempts to keep its local queue short but not totally
3953       empty, so that entries are available for stealing by other
3954       tasks. Only when there is no more work, a task will totally
3955       drain its local queue.
3956 
3957       (3) Global Mark Stack. This handles local queue overflow. During
3958       marking only sets of entries are moved between it and the local
3959       queues, as access to it requires a mutex and more fine-grain
3960       interaction with it which might cause contention. If it
3961       overflows, then the marking phase should restart and iterate
3962       over the bitmap to identify gray objects. Throughout the marking
3963       phase, tasks attempt to keep the global mark stack at a small
3964       length but not totally empty, so that entries are available for
3965       popping by other tasks. Only when there is no more work, tasks
3966       will totally drain the global mark stack.
3967 
3968       (4) SATB Buffer Queue. This is where completed SATB buffers are
3969       made available. Buffers are regularly removed from this queue
3970       and scanned for roots, so that the queue doesn't get too
3971       long. During remark, all completed buffers are processed, as
3972       well as the filled in parts of any uncompleted buffers.
3973 
3974     The do_marking_step() method tries to abort when the time target
3975     has been reached. There are a few other cases when the
3976     do_marking_step() method also aborts:
3977 
3978       (1) When the marking phase has been aborted (after a Full GC).
3979 
3980       (2) When a global overflow (on the global stack) has been
3981       triggered. Before the task aborts, it will actually sync up with
3982       the other tasks to ensure that all the marking data structures
3983       (local queues, stacks, fingers etc.)  are re-initialized so that
3984       when do_marking_step() completes, the marking phase can
3985       immediately restart.
3986 
3987       (3) When enough completed SATB buffers are available. The
3988       do_marking_step() method only tries to drain SATB buffers right
3989       at the beginning. So, if enough buffers are available, the
3990       marking step aborts and the SATB buffers are processed at
3991       the beginning of the next invocation.
3992 
3993       (4) To yield. when we have to yield then we abort and yield
3994       right at the end of do_marking_step(). This saves us from a lot
3995       of hassle as, by yielding we might allow a Full GC. If this
3996       happens then objects will be compacted underneath our feet, the
3997       heap might shrink, etc. We save checking for this by just
3998       aborting and doing the yield right at the end.
3999 
4000     From the above it follows that the do_marking_step() method should
4001     be called in a loop (or, otherwise, regularly) until it completes.
4002 
4003     If a marking step completes without its has_aborted() flag being
4004     true, it means it has completed the current marking phase (and
4005     also all other marking tasks have done so and have all synced up).
4006 
4007     A method called regular_clock_call() is invoked "regularly" (in
4008     sub ms intervals) throughout marking. It is this clock method that
4009     checks all the abort conditions which were mentioned above and
4010     decides when the task should abort. A work-based scheme is used to
4011     trigger this clock method: when the number of object words the
4012     marking phase has scanned or the number of references the marking
4013     phase has visited reach a given limit. Additional invocations to
4014     the method clock have been planted in a few other strategic places
4015     too. The initial reason for the clock method was to avoid calling
4016     vtime too regularly, as it is quite expensive. So, once it was in
4017     place, it was natural to piggy-back all the other conditions on it
4018     too and not constantly check them throughout the code.
4019 
4020     If do_termination is true then do_marking_step will enter its
4021     termination protocol.
4022 
4023     The value of is_serial must be true when do_marking_step is being
4024     called serially (i.e. by the VMThread) and do_marking_step should
4025     skip any synchronization in the termination and overflow code.
4026     Examples include the serial remark code and the serial reference
4027     processing closures.
4028 
4029     The value of is_serial must be false when do_marking_step is
4030     being called by any of the worker threads in a work gang.
4031     Examples include the concurrent marking code (CMMarkingTask),
4032     the MT remark code, and the MT reference processing closures.
4033 
4034  *****************************************************************************/
4035 
4036 void CMTask::do_marking_step(double time_target_ms,
4037                              bool do_termination,
4038                              bool is_serial) {
4039   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4040   assert(concurrent() == _cm->concurrent(), "they should be the same");
4041 
4042   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4043   assert(_task_queues != NULL, "invariant");
4044   assert(_task_queue != NULL, "invariant");
4045   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4046 
4047   assert(!_claimed,
4048          "only one thread should claim this task at any one time");
4049 
4050   // OK, this doesn't safeguard again all possible scenarios, as it is
4051   // possible for two threads to set the _claimed flag at the same
4052   // time. But it is only for debugging purposes anyway and it will
4053   // catch most problems.
4054   _claimed = true;
4055 
4056   _start_time_ms = os::elapsedVTime() * 1000.0;
4057   statsOnly( _interval_start_time_ms = _start_time_ms );
4058 
4059   // If do_stealing is true then do_marking_step will attempt to
4060   // steal work from the other CMTasks. It only makes sense to
4061   // enable stealing when the termination protocol is enabled
4062   // and do_marking_step() is not being called serially.
4063   bool do_stealing = do_termination && !is_serial;
4064 
4065   double diff_prediction_ms =
4066     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4067   _time_target_ms = time_target_ms - diff_prediction_ms;
4068 
4069   // set up the variables that are used in the work-based scheme to
4070   // call the regular clock method
4071   _words_scanned = 0;
4072   _refs_reached  = 0;
4073   recalculate_limits();
4074 
4075   // clear all flags
4076   clear_has_aborted();
4077   _has_timed_out = false;
4078   _draining_satb_buffers = false;
4079 
4080   ++_calls;
4081 
4082   if (_cm->verbose_low()) {
4083     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4084                            "target = %1.2lfms >>>>>>>>>>",
4085                            _worker_id, _calls, _time_target_ms);
4086   }
4087 
4088   // Set up the bitmap and oop closures. Anything that uses them is
4089   // eventually called from this method, so it is OK to allocate these
4090   // statically.
4091   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4092   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4093   set_cm_oop_closure(&cm_oop_closure);
4094 
4095   if (_cm->has_overflown()) {
4096     // This can happen if the mark stack overflows during a GC pause
4097     // and this task, after a yield point, restarts. We have to abort
4098     // as we need to get into the overflow protocol which happens
4099     // right at the end of this task.
4100     set_has_aborted();
4101   }
4102 
4103   // First drain any available SATB buffers. After this, we will not
4104   // look at SATB buffers before the next invocation of this method.
4105   // If enough completed SATB buffers are queued up, the regular clock
4106   // will abort this task so that it restarts.
4107   drain_satb_buffers();
4108   // ...then partially drain the local queue and the global stack
4109   drain_local_queue(true);
4110   drain_global_stack(true);
4111 
4112   do {
4113     if (!has_aborted() && _curr_region != NULL) {
4114       // This means that we're already holding on to a region.
4115       assert(_finger != NULL, "if region is not NULL, then the finger "
4116              "should not be NULL either");
4117 
4118       // We might have restarted this task after an evacuation pause
4119       // which might have evacuated the region we're holding on to
4120       // underneath our feet. Let's read its limit again to make sure
4121       // that we do not iterate over a region of the heap that
4122       // contains garbage (update_region_limit() will also move
4123       // _finger to the start of the region if it is found empty).
4124       update_region_limit();
4125       // We will start from _finger not from the start of the region,
4126       // as we might be restarting this task after aborting half-way
4127       // through scanning this region. In this case, _finger points to
4128       // the address where we last found a marked object. If this is a
4129       // fresh region, _finger points to start().
4130       MemRegion mr = MemRegion(_finger, _region_limit);
4131 
4132       if (_cm->verbose_low()) {
4133         gclog_or_tty->print_cr("[%u] we're scanning part "
4134                                "["PTR_FORMAT", "PTR_FORMAT") "
4135                                "of region "HR_FORMAT,
4136                                _worker_id, _finger, _region_limit,
4137                                HR_FORMAT_PARAMS(_curr_region));
4138       }
4139 
4140       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4141              "humongous regions should go around loop once only");
4142 
4143       // Some special cases:
4144       // If the memory region is empty, we can just give up the region.
4145       // If the current region is humongous then we only need to check
4146       // the bitmap for the bit associated with the start of the object,
4147       // scan the object if it's live, and give up the region.
4148       // Otherwise, let's iterate over the bitmap of the part of the region
4149       // that is left.
4150       // If the iteration is successful, give up the region.
4151       if (mr.is_empty()) {
4152         giveup_current_region();
4153         regular_clock_call();
4154       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4155         if (_nextMarkBitMap->isMarked(mr.start())) {
4156           // The object is marked - apply the closure
4157           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4158           bitmap_closure.do_bit(offset);
4159         }
4160         // Even if this task aborted while scanning the humongous object
4161         // we can (and should) give up the current region.
4162         giveup_current_region();
4163         regular_clock_call();
4164       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4165         giveup_current_region();
4166         regular_clock_call();
4167       } else {
4168         assert(has_aborted(), "currently the only way to do so");
4169         // The only way to abort the bitmap iteration is to return
4170         // false from the do_bit() method. However, inside the
4171         // do_bit() method we move the _finger to point to the
4172         // object currently being looked at. So, if we bail out, we
4173         // have definitely set _finger to something non-null.
4174         assert(_finger != NULL, "invariant");
4175 
4176         // Region iteration was actually aborted. So now _finger
4177         // points to the address of the object we last scanned. If we
4178         // leave it there, when we restart this task, we will rescan
4179         // the object. It is easy to avoid this. We move the finger by
4180         // enough to point to the next possible object header (the
4181         // bitmap knows by how much we need to move it as it knows its
4182         // granularity).
4183         assert(_finger < _region_limit, "invariant");
4184         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4185         // Check if bitmap iteration was aborted while scanning the last object
4186         if (new_finger >= _region_limit) {
4187           giveup_current_region();
4188         } else {
4189           move_finger_to(new_finger);
4190         }
4191       }
4192     }
4193     // At this point we have either completed iterating over the
4194     // region we were holding on to, or we have aborted.
4195 
4196     // We then partially drain the local queue and the global stack.
4197     // (Do we really need this?)
4198     drain_local_queue(true);
4199     drain_global_stack(true);
4200 
4201     // Read the note on the claim_region() method on why it might
4202     // return NULL with potentially more regions available for
4203     // claiming and why we have to check out_of_regions() to determine
4204     // whether we're done or not.
4205     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4206       // We are going to try to claim a new region. We should have
4207       // given up on the previous one.
4208       // Separated the asserts so that we know which one fires.
4209       assert(_curr_region  == NULL, "invariant");
4210       assert(_finger       == NULL, "invariant");
4211       assert(_region_limit == NULL, "invariant");
4212       if (_cm->verbose_low()) {
4213         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4214       }
4215       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4216       if (claimed_region != NULL) {
4217         // Yes, we managed to claim one
4218         statsOnly( ++_regions_claimed );
4219 
4220         if (_cm->verbose_low()) {
4221           gclog_or_tty->print_cr("[%u] we successfully claimed "
4222                                  "region "PTR_FORMAT,
4223                                  _worker_id, claimed_region);
4224         }
4225 
4226         setup_for_region(claimed_region);
4227         assert(_curr_region == claimed_region, "invariant");
4228       }
4229       // It is important to call the regular clock here. It might take
4230       // a while to claim a region if, for example, we hit a large
4231       // block of empty regions. So we need to call the regular clock
4232       // method once round the loop to make sure it's called
4233       // frequently enough.
4234       regular_clock_call();
4235     }
4236 
4237     if (!has_aborted() && _curr_region == NULL) {
4238       assert(_cm->out_of_regions(),
4239              "at this point we should be out of regions");
4240     }
4241   } while ( _curr_region != NULL && !has_aborted());
4242 
4243   if (!has_aborted()) {
4244     // We cannot check whether the global stack is empty, since other
4245     // tasks might be pushing objects to it concurrently.
4246     assert(_cm->out_of_regions(),
4247            "at this point we should be out of regions");
4248 
4249     if (_cm->verbose_low()) {
4250       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4251     }
4252 
4253     // Try to reduce the number of available SATB buffers so that
4254     // remark has less work to do.
4255     drain_satb_buffers();
4256   }
4257 
4258   // Since we've done everything else, we can now totally drain the
4259   // local queue and global stack.
4260   drain_local_queue(false);
4261   drain_global_stack(false);
4262 
4263   // Attempt at work stealing from other task's queues.
4264   if (do_stealing && !has_aborted()) {
4265     // We have not aborted. This means that we have finished all that
4266     // we could. Let's try to do some stealing...
4267 
4268     // We cannot check whether the global stack is empty, since other
4269     // tasks might be pushing objects to it concurrently.
4270     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4271            "only way to reach here");
4272 
4273     if (_cm->verbose_low()) {
4274       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4275     }
4276 
4277     while (!has_aborted()) {
4278       oop obj;
4279       statsOnly( ++_steal_attempts );
4280 
4281       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4282         if (_cm->verbose_medium()) {
4283           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4284                                  _worker_id, (void*) obj);
4285         }
4286 
4287         statsOnly( ++_steals );
4288 
4289         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4290                "any stolen object should be marked");
4291         scan_object(obj);
4292 
4293         // And since we're towards the end, let's totally drain the
4294         // local queue and global stack.
4295         drain_local_queue(false);
4296         drain_global_stack(false);
4297       } else {
4298         break;
4299       }
4300     }
4301   }
4302 
4303   // If we are about to wrap up and go into termination, check if we
4304   // should raise the overflow flag.
4305   if (do_termination && !has_aborted()) {
4306     if (_cm->force_overflow()->should_force()) {
4307       _cm->set_has_overflown();
4308       regular_clock_call();
4309     }
4310   }
4311 
4312   // We still haven't aborted. Now, let's try to get into the
4313   // termination protocol.
4314   if (do_termination && !has_aborted()) {
4315     // We cannot check whether the global stack is empty, since other
4316     // tasks might be concurrently pushing objects on it.
4317     // Separated the asserts so that we know which one fires.
4318     assert(_cm->out_of_regions(), "only way to reach here");
4319     assert(_task_queue->size() == 0, "only way to reach here");
4320 
4321     if (_cm->verbose_low()) {
4322       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4323     }
4324 
4325     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4326 
4327     // The CMTask class also extends the TerminatorTerminator class,
4328     // hence its should_exit_termination() method will also decide
4329     // whether to exit the termination protocol or not.
4330     bool finished = (is_serial ||
4331                      _cm->terminator()->offer_termination(this));
4332     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4333     _termination_time_ms +=
4334       termination_end_time_ms - _termination_start_time_ms;
4335 
4336     if (finished) {
4337       // We're all done.
4338 
4339       if (_worker_id == 0) {
4340         // let's allow task 0 to do this
4341         if (concurrent()) {
4342           assert(_cm->concurrent_marking_in_progress(), "invariant");
4343           // we need to set this to false before the next
4344           // safepoint. This way we ensure that the marking phase
4345           // doesn't observe any more heap expansions.
4346           _cm->clear_concurrent_marking_in_progress();
4347         }
4348       }
4349 
4350       // We can now guarantee that the global stack is empty, since
4351       // all other tasks have finished. We separated the guarantees so
4352       // that, if a condition is false, we can immediately find out
4353       // which one.
4354       guarantee(_cm->out_of_regions(), "only way to reach here");
4355       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4356       guarantee(_task_queue->size() == 0, "only way to reach here");
4357       guarantee(!_cm->has_overflown(), "only way to reach here");
4358       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4359 
4360       if (_cm->verbose_low()) {
4361         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4362       }
4363     } else {
4364       // Apparently there's more work to do. Let's abort this task. It
4365       // will restart it and we can hopefully find more things to do.
4366 
4367       if (_cm->verbose_low()) {
4368         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4369                                _worker_id);
4370       }
4371 
4372       set_has_aborted();
4373       statsOnly( ++_aborted_termination );
4374     }
4375   }
4376 
4377   // Mainly for debugging purposes to make sure that a pointer to the
4378   // closure which was statically allocated in this frame doesn't
4379   // escape it by accident.
4380   set_cm_oop_closure(NULL);
4381   double end_time_ms = os::elapsedVTime() * 1000.0;
4382   double elapsed_time_ms = end_time_ms - _start_time_ms;
4383   // Update the step history.
4384   _step_times_ms.add(elapsed_time_ms);
4385 
4386   if (has_aborted()) {
4387     // The task was aborted for some reason.
4388 
4389     statsOnly( ++_aborted );
4390 
4391     if (_has_timed_out) {
4392       double diff_ms = elapsed_time_ms - _time_target_ms;
4393       // Keep statistics of how well we did with respect to hitting
4394       // our target only if we actually timed out (if we aborted for
4395       // other reasons, then the results might get skewed).
4396       _marking_step_diffs_ms.add(diff_ms);
4397     }
4398 
4399     if (_cm->has_overflown()) {
4400       // This is the interesting one. We aborted because a global
4401       // overflow was raised. This means we have to restart the
4402       // marking phase and start iterating over regions. However, in
4403       // order to do this we have to make sure that all tasks stop
4404       // what they are doing and re-initialise in a safe manner. We
4405       // will achieve this with the use of two barrier sync points.
4406 
4407       if (_cm->verbose_low()) {
4408         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4409       }
4410 
4411       if (!is_serial) {
4412         // We only need to enter the sync barrier if being called
4413         // from a parallel context
4414         _cm->enter_first_sync_barrier(_worker_id);
4415 
4416         // When we exit this sync barrier we know that all tasks have
4417         // stopped doing marking work. So, it's now safe to
4418         // re-initialise our data structures. At the end of this method,
4419         // task 0 will clear the global data structures.
4420       }
4421 
4422       statsOnly( ++_aborted_overflow );
4423 
4424       // We clear the local state of this task...
4425       clear_region_fields();
4426 
4427       if (!is_serial) {
4428         // ...and enter the second barrier.
4429         _cm->enter_second_sync_barrier(_worker_id);
4430       }
4431       // At this point, if we're during the concurrent phase of
4432       // marking, everything has been re-initialized and we're
4433       // ready to restart.
4434     }
4435 
4436     if (_cm->verbose_low()) {
4437       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4438                              "elapsed = %1.2lfms <<<<<<<<<<",
4439                              _worker_id, _time_target_ms, elapsed_time_ms);
4440       if (_cm->has_aborted()) {
4441         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4442                                _worker_id);
4443       }
4444     }
4445   } else {
4446     if (_cm->verbose_low()) {
4447       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4448                              "elapsed = %1.2lfms <<<<<<<<<<",
4449                              _worker_id, _time_target_ms, elapsed_time_ms);
4450     }
4451   }
4452 
4453   _claimed = false;
4454 }
4455 
4456 CMTask::CMTask(uint worker_id,
4457                ConcurrentMark* cm,
4458                size_t* marked_bytes,
4459                BitMap* card_bm,
4460                CMTaskQueue* task_queue,
4461                CMTaskQueueSet* task_queues)
4462   : _g1h(G1CollectedHeap::heap()),
4463     _worker_id(worker_id), _cm(cm),
4464     _claimed(false),
4465     _nextMarkBitMap(NULL), _hash_seed(17),
4466     _task_queue(task_queue),
4467     _task_queues(task_queues),
4468     _cm_oop_closure(NULL),
4469     _marked_bytes_array(marked_bytes),
4470     _card_bm(card_bm) {
4471   guarantee(task_queue != NULL, "invariant");
4472   guarantee(task_queues != NULL, "invariant");
4473 
4474   statsOnly( _clock_due_to_scanning = 0;
4475              _clock_due_to_marking  = 0 );
4476 
4477   _marking_step_diffs_ms.add(0.5);
4478 }
4479 
4480 // These are formatting macros that are used below to ensure
4481 // consistent formatting. The *_H_* versions are used to format the
4482 // header for a particular value and they should be kept consistent
4483 // with the corresponding macro. Also note that most of the macros add
4484 // the necessary white space (as a prefix) which makes them a bit
4485 // easier to compose.
4486 
4487 // All the output lines are prefixed with this string to be able to
4488 // identify them easily in a large log file.
4489 #define G1PPRL_LINE_PREFIX            "###"
4490 
4491 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4492 #ifdef _LP64
4493 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4494 #else // _LP64
4495 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4496 #endif // _LP64
4497 
4498 // For per-region info
4499 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4500 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4501 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4502 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4503 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4504 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4505 
4506 // For summary info
4507 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4508 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4509 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4510 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4511 
4512 G1PrintRegionLivenessInfoClosure::
4513 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4514   : _out(out),
4515     _total_used_bytes(0), _total_capacity_bytes(0),
4516     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4517     _hum_used_bytes(0), _hum_capacity_bytes(0),
4518     _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
4519   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4520   MemRegion g1_committed = g1h->g1_committed();
4521   MemRegion g1_reserved = g1h->g1_reserved();
4522   double now = os::elapsedTime();
4523 
4524   // Print the header of the output.
4525   _out->cr();
4526   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4527   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4528                  G1PPRL_SUM_ADDR_FORMAT("committed")
4529                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4530                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4531                  g1_committed.start(), g1_committed.end(),
4532                  g1_reserved.start(), g1_reserved.end(),
4533                  HeapRegion::GrainBytes);
4534   _out->print_cr(G1PPRL_LINE_PREFIX);
4535   _out->print_cr(G1PPRL_LINE_PREFIX
4536                  G1PPRL_TYPE_H_FORMAT
4537                  G1PPRL_ADDR_BASE_H_FORMAT
4538                  G1PPRL_BYTE_H_FORMAT
4539                  G1PPRL_BYTE_H_FORMAT
4540                  G1PPRL_BYTE_H_FORMAT
4541                  G1PPRL_DOUBLE_H_FORMAT,
4542                  "type", "address-range",
4543                  "used", "prev-live", "next-live", "gc-eff");
4544   _out->print_cr(G1PPRL_LINE_PREFIX
4545                  G1PPRL_TYPE_H_FORMAT
4546                  G1PPRL_ADDR_BASE_H_FORMAT
4547                  G1PPRL_BYTE_H_FORMAT
4548                  G1PPRL_BYTE_H_FORMAT
4549                  G1PPRL_BYTE_H_FORMAT
4550                  G1PPRL_DOUBLE_H_FORMAT,
4551                  "", "",
4552                  "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
4553 }
4554 
4555 // It takes as a parameter a reference to one of the _hum_* fields, it
4556 // deduces the corresponding value for a region in a humongous region
4557 // series (either the region size, or what's left if the _hum_* field
4558 // is < the region size), and updates the _hum_* field accordingly.
4559 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4560   size_t bytes = 0;
4561   // The > 0 check is to deal with the prev and next live bytes which
4562   // could be 0.
4563   if (*hum_bytes > 0) {
4564     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4565     *hum_bytes -= bytes;
4566   }
4567   return bytes;
4568 }
4569 
4570 // It deduces the values for a region in a humongous region series
4571 // from the _hum_* fields and updates those accordingly. It assumes
4572 // that that _hum_* fields have already been set up from the "starts
4573 // humongous" region and we visit the regions in address order.
4574 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4575                                                      size_t* capacity_bytes,
4576                                                      size_t* prev_live_bytes,
4577                                                      size_t* next_live_bytes) {
4578   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4579   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4580   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4581   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4582   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4583 }
4584 
4585 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4586   const char* type = "";
4587   HeapWord* bottom       = r->bottom();
4588   HeapWord* end          = r->end();
4589   size_t capacity_bytes  = r->capacity();
4590   size_t used_bytes      = r->used();
4591   size_t prev_live_bytes = r->live_bytes();
4592   size_t next_live_bytes = r->next_live_bytes();
4593   double gc_eff          = r->gc_efficiency();
4594   if (r->used() == 0) {
4595     type = "FREE";
4596   } else if (r->is_survivor()) {
4597     type = "SURV";
4598   } else if (r->is_young()) {
4599     type = "EDEN";
4600   } else if (r->startsHumongous()) {
4601     type = "HUMS";
4602 
4603     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4604            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4605            "they should have been zeroed after the last time we used them");
4606     // Set up the _hum_* fields.
4607     _hum_capacity_bytes  = capacity_bytes;
4608     _hum_used_bytes      = used_bytes;
4609     _hum_prev_live_bytes = prev_live_bytes;
4610     _hum_next_live_bytes = next_live_bytes;
4611     get_hum_bytes(&used_bytes, &capacity_bytes,
4612                   &prev_live_bytes, &next_live_bytes);
4613     end = bottom + HeapRegion::GrainWords;
4614   } else if (r->continuesHumongous()) {
4615     type = "HUMC";
4616     get_hum_bytes(&used_bytes, &capacity_bytes,
4617                   &prev_live_bytes, &next_live_bytes);
4618     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4619   } else {
4620     type = "OLD";
4621   }
4622 
4623   _total_used_bytes      += used_bytes;
4624   _total_capacity_bytes  += capacity_bytes;
4625   _total_prev_live_bytes += prev_live_bytes;
4626   _total_next_live_bytes += next_live_bytes;
4627 
4628   // Print a line for this particular region.
4629   _out->print_cr(G1PPRL_LINE_PREFIX
4630                  G1PPRL_TYPE_FORMAT
4631                  G1PPRL_ADDR_BASE_FORMAT
4632                  G1PPRL_BYTE_FORMAT
4633                  G1PPRL_BYTE_FORMAT
4634                  G1PPRL_BYTE_FORMAT
4635                  G1PPRL_DOUBLE_FORMAT,
4636                  type, bottom, end,
4637                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
4638 
4639   return false;
4640 }
4641 
4642 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4643   // Print the footer of the output.
4644   _out->print_cr(G1PPRL_LINE_PREFIX);
4645   _out->print_cr(G1PPRL_LINE_PREFIX
4646                  " SUMMARY"
4647                  G1PPRL_SUM_MB_FORMAT("capacity")
4648                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4649                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4650                  G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
4651                  bytes_to_mb(_total_capacity_bytes),
4652                  bytes_to_mb(_total_used_bytes),
4653                  perc(_total_used_bytes, _total_capacity_bytes),
4654                  bytes_to_mb(_total_prev_live_bytes),
4655                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4656                  bytes_to_mb(_total_next_live_bytes),
4657                  perc(_total_next_live_bytes, _total_capacity_bytes));
4658   _out->cr();
4659 }