1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "gc_implementation/shared/gcTimer.hpp"
  40 #include "gc_implementation/shared/gcTrace.hpp"
  41 #include "gc_implementation/shared/gcTraceTime.hpp"
  42 #include "memory/genOopClosures.inline.hpp"
  43 #include "memory/referencePolicy.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "services/memTracker.hpp"
  49 
  50 // Concurrent marking bit map wrapper
  51 
  52 CMBitMapRO::CMBitMapRO(int shifter) :
  53   _bm(),
  54   _shifter(shifter) {
  55   _bmStartWord = 0;
  56   _bmWordSize = 0;
  57 }
  58 
  59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  60                                                HeapWord* limit) const {
  61   // First we must round addr *up* to a possible object boundary.
  62   addr = (HeapWord*)align_size_up((intptr_t)addr,
  63                                   HeapWordSize << _shifter);
  64   size_t addrOffset = heapWordToOffset(addr);
  65   if (limit == NULL) {
  66     limit = _bmStartWord + _bmWordSize;
  67   }
  68   size_t limitOffset = heapWordToOffset(limit);
  69   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  70   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  71   assert(nextAddr >= addr, "get_next_one postcondition");
  72   assert(nextAddr == limit || isMarked(nextAddr),
  73          "get_next_one postcondition");
  74   return nextAddr;
  75 }
  76 
  77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
  78                                                  HeapWord* limit) const {
  79   size_t addrOffset = heapWordToOffset(addr);
  80   if (limit == NULL) {
  81     limit = _bmStartWord + _bmWordSize;
  82   }
  83   size_t limitOffset = heapWordToOffset(limit);
  84   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  85   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  86   assert(nextAddr >= addr, "get_next_one postcondition");
  87   assert(nextAddr == limit || !isMarked(nextAddr),
  88          "get_next_one postcondition");
  89   return nextAddr;
  90 }
  91 
  92 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  93   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  94   return (int) (diff >> _shifter);
  95 }
  96 
  97 #ifndef PRODUCT
  98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
  99   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 100   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 101          "size inconsistency");
 102   return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
 103          _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 104 }
 105 #endif
 106 
 107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 108   _bm.print_on_error(st, prefix);
 109 }
 110 
 111 bool CMBitMap::allocate(ReservedSpace heap_rs) {
 112   _bmStartWord = (HeapWord*)(heap_rs.base());
 113   _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
 114   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
 115                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 116   if (!brs.is_reserved()) {
 117     warning("ConcurrentMark marking bit map allocation failure");
 118     return false;
 119   }
 120   MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
 121   // For now we'll just commit all of the bit map up front.
 122   // Later on we'll try to be more parsimonious with swap.
 123   if (!_virtual_space.initialize(brs, brs.size())) {
 124     warning("ConcurrentMark marking bit map backing store failure");
 125     return false;
 126   }
 127   assert(_virtual_space.committed_size() == brs.size(),
 128          "didn't reserve backing store for all of concurrent marking bit map?");
 129   _bm.set_map((uintptr_t*)_virtual_space.low());
 130   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
 131          _bmWordSize, "inconsistency in bit map sizing");
 132   _bm.set_size(_bmWordSize >> _shifter);
 133   return true;
 134 }
 135 
 136 void CMBitMap::clearAll() {
 137   _bm.clear();
 138   return;
 139 }
 140 
 141 void CMBitMap::markRange(MemRegion mr) {
 142   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 143   assert(!mr.is_empty(), "unexpected empty region");
 144   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 145           ((HeapWord *) mr.end())),
 146          "markRange memory region end is not card aligned");
 147   // convert address range into offset range
 148   _bm.at_put_range(heapWordToOffset(mr.start()),
 149                    heapWordToOffset(mr.end()), true);
 150 }
 151 
 152 void CMBitMap::clearRange(MemRegion mr) {
 153   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 154   assert(!mr.is_empty(), "unexpected empty region");
 155   // convert address range into offset range
 156   _bm.at_put_range(heapWordToOffset(mr.start()),
 157                    heapWordToOffset(mr.end()), false);
 158 }
 159 
 160 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 161                                             HeapWord* end_addr) {
 162   HeapWord* start = getNextMarkedWordAddress(addr);
 163   start = MIN2(start, end_addr);
 164   HeapWord* end   = getNextUnmarkedWordAddress(start);
 165   end = MIN2(end, end_addr);
 166   assert(start <= end, "Consistency check");
 167   MemRegion mr(start, end);
 168   if (!mr.is_empty()) {
 169     clearRange(mr);
 170   }
 171   return mr;
 172 }
 173 
 174 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 175   _base(NULL), _cm(cm)
 176 #ifdef ASSERT
 177   , _drain_in_progress(false)
 178   , _drain_in_progress_yields(false)
 179 #endif
 180 {}
 181 
 182 bool CMMarkStack::allocate(size_t capacity) {
 183   // allocate a stack of the requisite depth
 184   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 185   if (!rs.is_reserved()) {
 186     warning("ConcurrentMark MarkStack allocation failure");
 187     return false;
 188   }
 189   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 190   if (!_virtual_space.initialize(rs, rs.size())) {
 191     warning("ConcurrentMark MarkStack backing store failure");
 192     // Release the virtual memory reserved for the marking stack
 193     rs.release();
 194     return false;
 195   }
 196   assert(_virtual_space.committed_size() == rs.size(),
 197          "Didn't reserve backing store for all of ConcurrentMark stack?");
 198   _base = (oop*) _virtual_space.low();
 199   setEmpty();
 200   _capacity = (jint) capacity;
 201   _saved_index = -1;
 202   _should_expand = false;
 203   NOT_PRODUCT(_max_depth = 0);
 204   return true;
 205 }
 206 
 207 void CMMarkStack::expand() {
 208   // Called, during remark, if we've overflown the marking stack during marking.
 209   assert(isEmpty(), "stack should been emptied while handling overflow");
 210   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 211   // Clear expansion flag
 212   _should_expand = false;
 213   if (_capacity == (jint) MarkStackSizeMax) {
 214     if (PrintGCDetails && Verbose) {
 215       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 216     }
 217     return;
 218   }
 219   // Double capacity if possible
 220   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 221   // Do not give up existing stack until we have managed to
 222   // get the double capacity that we desired.
 223   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 224                                                            sizeof(oop)));
 225   if (rs.is_reserved()) {
 226     // Release the backing store associated with old stack
 227     _virtual_space.release();
 228     // Reinitialize virtual space for new stack
 229     if (!_virtual_space.initialize(rs, rs.size())) {
 230       fatal("Not enough swap for expanded marking stack capacity");
 231     }
 232     _base = (oop*)(_virtual_space.low());
 233     _index = 0;
 234     _capacity = new_capacity;
 235   } else {
 236     if (PrintGCDetails && Verbose) {
 237       // Failed to double capacity, continue;
 238       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 239                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 240                           _capacity / K, new_capacity / K);
 241     }
 242   }
 243 }
 244 
 245 void CMMarkStack::set_should_expand() {
 246   // If we're resetting the marking state because of an
 247   // marking stack overflow, record that we should, if
 248   // possible, expand the stack.
 249   _should_expand = _cm->has_overflown();
 250 }
 251 
 252 CMMarkStack::~CMMarkStack() {
 253   if (_base != NULL) {
 254     _base = NULL;
 255     _virtual_space.release();
 256   }
 257 }
 258 
 259 void CMMarkStack::par_push(oop ptr) {
 260   while (true) {
 261     if (isFull()) {
 262       _overflow = true;
 263       return;
 264     }
 265     // Otherwise...
 266     jint index = _index;
 267     jint next_index = index+1;
 268     jint res = Atomic::cmpxchg(next_index, &_index, index);
 269     if (res == index) {
 270       _base[index] = ptr;
 271       // Note that we don't maintain this atomically.  We could, but it
 272       // doesn't seem necessary.
 273       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 274       return;
 275     }
 276     // Otherwise, we need to try again.
 277   }
 278 }
 279 
 280 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 281   while (true) {
 282     if (isFull()) {
 283       _overflow = true;
 284       return;
 285     }
 286     // Otherwise...
 287     jint index = _index;
 288     jint next_index = index + n;
 289     if (next_index > _capacity) {
 290       _overflow = true;
 291       return;
 292     }
 293     jint res = Atomic::cmpxchg(next_index, &_index, index);
 294     if (res == index) {
 295       for (int i = 0; i < n; i++) {
 296         int  ind = index + i;
 297         assert(ind < _capacity, "By overflow test above.");
 298         _base[ind] = ptr_arr[i];
 299       }
 300       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 301       return;
 302     }
 303     // Otherwise, we need to try again.
 304   }
 305 }
 306 
 307 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 308   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 309   jint start = _index;
 310   jint next_index = start + n;
 311   if (next_index > _capacity) {
 312     _overflow = true;
 313     return;
 314   }
 315   // Otherwise.
 316   _index = next_index;
 317   for (int i = 0; i < n; i++) {
 318     int ind = start + i;
 319     assert(ind < _capacity, "By overflow test above.");
 320     _base[ind] = ptr_arr[i];
 321   }
 322   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 323 }
 324 
 325 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 326   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 327   jint index = _index;
 328   if (index == 0) {
 329     *n = 0;
 330     return false;
 331   } else {
 332     int k = MIN2(max, index);
 333     jint  new_ind = index - k;
 334     for (int j = 0; j < k; j++) {
 335       ptr_arr[j] = _base[new_ind + j];
 336     }
 337     _index = new_ind;
 338     *n = k;
 339     return true;
 340   }
 341 }
 342 
 343 template<class OopClosureClass>
 344 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 345   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 346          || SafepointSynchronize::is_at_safepoint(),
 347          "Drain recursion must be yield-safe.");
 348   bool res = true;
 349   debug_only(_drain_in_progress = true);
 350   debug_only(_drain_in_progress_yields = yield_after);
 351   while (!isEmpty()) {
 352     oop newOop = pop();
 353     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 354     assert(newOop->is_oop(), "Expected an oop");
 355     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 356            "only grey objects on this stack");
 357     newOop->oop_iterate(cl);
 358     if (yield_after && _cm->do_yield_check()) {
 359       res = false;
 360       break;
 361     }
 362   }
 363   debug_only(_drain_in_progress = false);
 364   return res;
 365 }
 366 
 367 void CMMarkStack::note_start_of_gc() {
 368   assert(_saved_index == -1,
 369          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 370   _saved_index = _index;
 371 }
 372 
 373 void CMMarkStack::note_end_of_gc() {
 374   // This is intentionally a guarantee, instead of an assert. If we
 375   // accidentally add something to the mark stack during GC, it
 376   // will be a correctness issue so it's better if we crash. we'll
 377   // only check this once per GC anyway, so it won't be a performance
 378   // issue in any way.
 379   guarantee(_saved_index == _index,
 380             err_msg("saved index: %d index: %d", _saved_index, _index));
 381   _saved_index = -1;
 382 }
 383 
 384 void CMMarkStack::oops_do(OopClosure* f) {
 385   assert(_saved_index == _index,
 386          err_msg("saved index: %d index: %d", _saved_index, _index));
 387   for (int i = 0; i < _index; i += 1) {
 388     f->do_oop(&_base[i]);
 389   }
 390 }
 391 
 392 bool ConcurrentMark::not_yet_marked(oop obj) const {
 393   return _g1h->is_obj_ill(obj);
 394 }
 395 
 396 CMRootRegions::CMRootRegions() :
 397   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 398   _should_abort(false),  _next_survivor(NULL) { }
 399 
 400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 401   _young_list = g1h->young_list();
 402   _cm = cm;
 403 }
 404 
 405 void CMRootRegions::prepare_for_scan() {
 406   assert(!scan_in_progress(), "pre-condition");
 407 
 408   // Currently, only survivors can be root regions.
 409   assert(_next_survivor == NULL, "pre-condition");
 410   _next_survivor = _young_list->first_survivor_region();
 411   _scan_in_progress = (_next_survivor != NULL);
 412   _should_abort = false;
 413 }
 414 
 415 HeapRegion* CMRootRegions::claim_next() {
 416   if (_should_abort) {
 417     // If someone has set the should_abort flag, we return NULL to
 418     // force the caller to bail out of their loop.
 419     return NULL;
 420   }
 421 
 422   // Currently, only survivors can be root regions.
 423   HeapRegion* res = _next_survivor;
 424   if (res != NULL) {
 425     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 426     // Read it again in case it changed while we were waiting for the lock.
 427     res = _next_survivor;
 428     if (res != NULL) {
 429       if (res == _young_list->last_survivor_region()) {
 430         // We just claimed the last survivor so store NULL to indicate
 431         // that we're done.
 432         _next_survivor = NULL;
 433       } else {
 434         _next_survivor = res->get_next_young_region();
 435       }
 436     } else {
 437       // Someone else claimed the last survivor while we were trying
 438       // to take the lock so nothing else to do.
 439     }
 440   }
 441   assert(res == NULL || res->is_survivor(), "post-condition");
 442 
 443   return res;
 444 }
 445 
 446 void CMRootRegions::scan_finished() {
 447   assert(scan_in_progress(), "pre-condition");
 448 
 449   // Currently, only survivors can be root regions.
 450   if (!_should_abort) {
 451     assert(_next_survivor == NULL, "we should have claimed all survivors");
 452   }
 453   _next_survivor = NULL;
 454 
 455   {
 456     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 457     _scan_in_progress = false;
 458     RootRegionScan_lock->notify_all();
 459   }
 460 }
 461 
 462 bool CMRootRegions::wait_until_scan_finished() {
 463   if (!scan_in_progress()) return false;
 464 
 465   {
 466     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 467     while (scan_in_progress()) {
 468       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 469     }
 470   }
 471   return true;
 472 }
 473 
 474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 476 #endif // _MSC_VER
 477 
 478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 479   return MAX2((n_par_threads + 2) / 4, 1U);
 480 }
 481 
 482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 483   _g1h(g1h),
 484   _markBitMap1(log2_intptr(MinObjAlignment)),
 485   _markBitMap2(log2_intptr(MinObjAlignment)),
 486   _parallel_marking_threads(0),
 487   _max_parallel_marking_threads(0),
 488   _sleep_factor(0.0),
 489   _marking_task_overhead(1.0),
 490   _cleanup_sleep_factor(0.0),
 491   _cleanup_task_overhead(1.0),
 492   _cleanup_list("Cleanup List"),
 493   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 494   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 495             CardTableModRefBS::card_shift,
 496             false /* in_resource_area*/),
 497 
 498   _prevMarkBitMap(&_markBitMap1),
 499   _nextMarkBitMap(&_markBitMap2),
 500 
 501   _markStack(this),
 502   // _finger set in set_non_marking_state
 503 
 504   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 505   // _active_tasks set in set_non_marking_state
 506   // _tasks set inside the constructor
 507   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 508   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 509 
 510   _has_overflown(false),
 511   _concurrent(false),
 512   _has_aborted(false),
 513   _restart_for_overflow(false),
 514   _concurrent_marking_in_progress(false),
 515 
 516   // _verbose_level set below
 517 
 518   _init_times(),
 519   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 520   _cleanup_times(),
 521   _total_counting_time(0.0),
 522   _total_rs_scrub_time(0.0),
 523 
 524   _parallel_workers(NULL),
 525 
 526   _count_card_bitmaps(NULL),
 527   _count_marked_bytes(NULL),
 528   _completed_initialization(false) {
 529   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 530   if (verbose_level < no_verbose) {
 531     verbose_level = no_verbose;
 532   }
 533   if (verbose_level > high_verbose) {
 534     verbose_level = high_verbose;
 535   }
 536   _verbose_level = verbose_level;
 537 
 538   if (verbose_low()) {
 539     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 540                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 541   }
 542 
 543   if (!_markBitMap1.allocate(heap_rs)) {
 544     warning("Failed to allocate first CM bit map");
 545     return;
 546   }
 547   if (!_markBitMap2.allocate(heap_rs)) {
 548     warning("Failed to allocate second CM bit map");
 549     return;
 550   }
 551 
 552   // Create & start a ConcurrentMark thread.
 553   _cmThread = new ConcurrentMarkThread(this);
 554   assert(cmThread() != NULL, "CM Thread should have been created");
 555   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 556   if (_cmThread->osthread() == NULL) {
 557       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 558   }
 559 
 560   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 561   assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
 562   assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 563 
 564   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 565   satb_qs.set_buffer_size(G1SATBBufferSize);
 566 
 567   _root_regions.init(_g1h, this);
 568 
 569   if (ConcGCThreads > ParallelGCThreads) {
 570     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 571             "than ParallelGCThreads (" UINTX_FORMAT ").",
 572             ConcGCThreads, ParallelGCThreads);
 573     return;
 574   }
 575   if (ParallelGCThreads == 0) {
 576     // if we are not running with any parallel GC threads we will not
 577     // spawn any marking threads either
 578     _parallel_marking_threads =       0;
 579     _max_parallel_marking_threads =   0;
 580     _sleep_factor             =     0.0;
 581     _marking_task_overhead    =     1.0;
 582   } else {
 583     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 584       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 585       // if both are set
 586       _sleep_factor             = 0.0;
 587       _marking_task_overhead    = 1.0;
 588     } else if (G1MarkingOverheadPercent > 0) {
 589       // We will calculate the number of parallel marking threads based
 590       // on a target overhead with respect to the soft real-time goal
 591       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 592       double overall_cm_overhead =
 593         (double) MaxGCPauseMillis * marking_overhead /
 594         (double) GCPauseIntervalMillis;
 595       double cpu_ratio = 1.0 / (double) os::processor_count();
 596       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 597       double marking_task_overhead =
 598         overall_cm_overhead / marking_thread_num *
 599                                                 (double) os::processor_count();
 600       double sleep_factor =
 601                          (1.0 - marking_task_overhead) / marking_task_overhead;
 602 
 603       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 604       _sleep_factor             = sleep_factor;
 605       _marking_task_overhead    = marking_task_overhead;
 606     } else {
 607       // Calculate the number of parallel marking threads by scaling
 608       // the number of parallel GC threads.
 609       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 610       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 611       _sleep_factor             = 0.0;
 612       _marking_task_overhead    = 1.0;
 613     }
 614 
 615     assert(ConcGCThreads > 0, "Should have been set");
 616     _parallel_marking_threads = (uint) ConcGCThreads;
 617     _max_parallel_marking_threads = _parallel_marking_threads;
 618 
 619     if (parallel_marking_threads() > 1) {
 620       _cleanup_task_overhead = 1.0;
 621     } else {
 622       _cleanup_task_overhead = marking_task_overhead();
 623     }
 624     _cleanup_sleep_factor =
 625                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 626 
 627 #if 0
 628     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 629     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 630     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 631     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 632     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 633 #endif
 634 
 635     guarantee(parallel_marking_threads() > 0, "peace of mind");
 636     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 637          _max_parallel_marking_threads, false, true);
 638     if (_parallel_workers == NULL) {
 639       vm_exit_during_initialization("Failed necessary allocation.");
 640     } else {
 641       _parallel_workers->initialize_workers();
 642     }
 643   }
 644 
 645   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 646     uintx mark_stack_size =
 647       MIN2(MarkStackSizeMax,
 648           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 649     // Verify that the calculated value for MarkStackSize is in range.
 650     // It would be nice to use the private utility routine from Arguments.
 651     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 652       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 653               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 654               mark_stack_size, 1, MarkStackSizeMax);
 655       return;
 656     }
 657     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 658   } else {
 659     // Verify MarkStackSize is in range.
 660     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 661       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 662         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 663           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 664                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 665                   MarkStackSize, 1, MarkStackSizeMax);
 666           return;
 667         }
 668       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 669         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 670           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 671                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 672                   MarkStackSize, MarkStackSizeMax);
 673           return;
 674         }
 675       }
 676     }
 677   }
 678 
 679   if (!_markStack.allocate(MarkStackSize)) {
 680     warning("Failed to allocate CM marking stack");
 681     return;
 682   }
 683 
 684   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 685   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 686 
 687   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 688   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 689 
 690   BitMap::idx_t card_bm_size = _card_bm.size();
 691 
 692   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 693   _active_tasks = _max_worker_id;
 694 
 695   size_t max_regions = (size_t) _g1h->max_regions();
 696   for (uint i = 0; i < _max_worker_id; ++i) {
 697     CMTaskQueue* task_queue = new CMTaskQueue();
 698     task_queue->initialize();
 699     _task_queues->register_queue(i, task_queue);
 700 
 701     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 702     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 703 
 704     _tasks[i] = new CMTask(i, this,
 705                            _count_marked_bytes[i],
 706                            &_count_card_bitmaps[i],
 707                            task_queue, _task_queues);
 708 
 709     _accum_task_vtime[i] = 0.0;
 710   }
 711 
 712   // Calculate the card number for the bottom of the heap. Used
 713   // in biasing indexes into the accounting card bitmaps.
 714   _heap_bottom_card_num =
 715     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 716                                 CardTableModRefBS::card_shift);
 717 
 718   // Clear all the liveness counting data
 719   clear_all_count_data();
 720 
 721   // so that the call below can read a sensible value
 722   _heap_start = (HeapWord*) heap_rs.base();
 723   set_non_marking_state();
 724   _completed_initialization = true;
 725 }
 726 
 727 void ConcurrentMark::update_g1_committed(bool force) {
 728   // If concurrent marking is not in progress, then we do not need to
 729   // update _heap_end.
 730   if (!concurrent_marking_in_progress() && !force) return;
 731 
 732   MemRegion committed = _g1h->g1_committed();
 733   assert(committed.start() == _heap_start, "start shouldn't change");
 734   HeapWord* new_end = committed.end();
 735   if (new_end > _heap_end) {
 736     // The heap has been expanded.
 737 
 738     _heap_end = new_end;
 739   }
 740   // Notice that the heap can also shrink. However, this only happens
 741   // during a Full GC (at least currently) and the entire marking
 742   // phase will bail out and the task will not be restarted. So, let's
 743   // do nothing.
 744 }
 745 
 746 void ConcurrentMark::reset() {
 747   // Starting values for these two. This should be called in a STW
 748   // phase. CM will be notified of any future g1_committed expansions
 749   // will be at the end of evacuation pauses, when tasks are
 750   // inactive.
 751   MemRegion committed = _g1h->g1_committed();
 752   _heap_start = committed.start();
 753   _heap_end   = committed.end();
 754 
 755   // Separated the asserts so that we know which one fires.
 756   assert(_heap_start != NULL, "heap bounds should look ok");
 757   assert(_heap_end != NULL, "heap bounds should look ok");
 758   assert(_heap_start < _heap_end, "heap bounds should look ok");
 759 
 760   // Reset all the marking data structures and any necessary flags
 761   reset_marking_state();
 762 
 763   if (verbose_low()) {
 764     gclog_or_tty->print_cr("[global] resetting");
 765   }
 766 
 767   // We do reset all of them, since different phases will use
 768   // different number of active threads. So, it's easiest to have all
 769   // of them ready.
 770   for (uint i = 0; i < _max_worker_id; ++i) {
 771     _tasks[i]->reset(_nextMarkBitMap);
 772   }
 773 
 774   // we need this to make sure that the flag is on during the evac
 775   // pause with initial mark piggy-backed
 776   set_concurrent_marking_in_progress();
 777 }
 778 
 779 
 780 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 781   _markStack.set_should_expand();
 782   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 783   if (clear_overflow) {
 784     clear_has_overflown();
 785   } else {
 786     assert(has_overflown(), "pre-condition");
 787   }
 788   _finger = _heap_start;
 789 
 790   for (uint i = 0; i < _max_worker_id; ++i) {
 791     CMTaskQueue* queue = _task_queues->queue(i);
 792     queue->set_empty();
 793   }
 794 }
 795 
 796 void ConcurrentMark::set_concurrency(uint active_tasks) {
 797   assert(active_tasks <= _max_worker_id, "we should not have more");
 798 
 799   _active_tasks = active_tasks;
 800   // Need to update the three data structures below according to the
 801   // number of active threads for this phase.
 802   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 803   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 804   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 805 }
 806 
 807 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 808   set_concurrency(active_tasks);
 809 
 810   _concurrent = concurrent;
 811   // We propagate this to all tasks, not just the active ones.
 812   for (uint i = 0; i < _max_worker_id; ++i)
 813     _tasks[i]->set_concurrent(concurrent);
 814 
 815   if (concurrent) {
 816     set_concurrent_marking_in_progress();
 817   } else {
 818     // We currently assume that the concurrent flag has been set to
 819     // false before we start remark. At this point we should also be
 820     // in a STW phase.
 821     assert(!concurrent_marking_in_progress(), "invariant");
 822     assert(_finger == _heap_end,
 823            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 824                    _finger, _heap_end));
 825     update_g1_committed(true);
 826   }
 827 }
 828 
 829 void ConcurrentMark::set_non_marking_state() {
 830   // We set the global marking state to some default values when we're
 831   // not doing marking.
 832   reset_marking_state();
 833   _active_tasks = 0;
 834   clear_concurrent_marking_in_progress();
 835 }
 836 
 837 ConcurrentMark::~ConcurrentMark() {
 838   // The ConcurrentMark instance is never freed.
 839   ShouldNotReachHere();
 840 }
 841 
 842 void ConcurrentMark::clearNextBitmap() {
 843   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 844   G1CollectorPolicy* g1p = g1h->g1_policy();
 845 
 846   // Make sure that the concurrent mark thread looks to still be in
 847   // the current cycle.
 848   guarantee(cmThread()->during_cycle(), "invariant");
 849 
 850   // We are finishing up the current cycle by clearing the next
 851   // marking bitmap and getting it ready for the next cycle. During
 852   // this time no other cycle can start. So, let's make sure that this
 853   // is the case.
 854   guarantee(!g1h->mark_in_progress(), "invariant");
 855 
 856   // clear the mark bitmap (no grey objects to start with).
 857   // We need to do this in chunks and offer to yield in between
 858   // each chunk.
 859   HeapWord* start  = _nextMarkBitMap->startWord();
 860   HeapWord* end    = _nextMarkBitMap->endWord();
 861   HeapWord* cur    = start;
 862   size_t chunkSize = M;
 863   while (cur < end) {
 864     HeapWord* next = cur + chunkSize;
 865     if (next > end) {
 866       next = end;
 867     }
 868     MemRegion mr(cur,next);
 869     _nextMarkBitMap->clearRange(mr);
 870     cur = next;
 871     do_yield_check();
 872 
 873     // Repeat the asserts from above. We'll do them as asserts here to
 874     // minimize their overhead on the product. However, we'll have
 875     // them as guarantees at the beginning / end of the bitmap
 876     // clearing to get some checking in the product.
 877     assert(cmThread()->during_cycle(), "invariant");
 878     assert(!g1h->mark_in_progress(), "invariant");
 879   }
 880 
 881   // Clear the liveness counting data
 882   clear_all_count_data();
 883 
 884   // Repeat the asserts from above.
 885   guarantee(cmThread()->during_cycle(), "invariant");
 886   guarantee(!g1h->mark_in_progress(), "invariant");
 887 }
 888 
 889 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 890 public:
 891   bool doHeapRegion(HeapRegion* r) {
 892     if (!r->continuesHumongous()) {
 893       r->note_start_of_marking();
 894     }
 895     return false;
 896   }
 897 };
 898 
 899 void ConcurrentMark::checkpointRootsInitialPre() {
 900   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 901   G1CollectorPolicy* g1p = g1h->g1_policy();
 902 
 903   _has_aborted = false;
 904 
 905 #ifndef PRODUCT
 906   if (G1PrintReachableAtInitialMark) {
 907     print_reachable("at-cycle-start",
 908                     VerifyOption_G1UsePrevMarking, true /* all */);
 909   }
 910 #endif
 911 
 912   // Initialize marking structures. This has to be done in a STW phase.
 913   reset();
 914 
 915   // For each region note start of marking.
 916   NoteStartOfMarkHRClosure startcl;
 917   g1h->heap_region_iterate(&startcl);
 918 }
 919 
 920 
 921 void ConcurrentMark::checkpointRootsInitialPost() {
 922   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 923 
 924   // If we force an overflow during remark, the remark operation will
 925   // actually abort and we'll restart concurrent marking. If we always
 926   // force an overflow during remark we'll never actually complete the
 927   // marking phase. So, we initialize this here, at the start of the
 928   // cycle, so that at the remaining overflow number will decrease at
 929   // every remark and we'll eventually not need to cause one.
 930   force_overflow_stw()->init();
 931 
 932   // Start Concurrent Marking weak-reference discovery.
 933   ReferenceProcessor* rp = g1h->ref_processor_cm();
 934   // enable ("weak") refs discovery
 935   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 936   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 937 
 938   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 939   // This is the start of  the marking cycle, we're expected all
 940   // threads to have SATB queues with active set to false.
 941   satb_mq_set.set_active_all_threads(true, /* new active value */
 942                                      false /* expected_active */);
 943 
 944   _root_regions.prepare_for_scan();
 945 
 946   // update_g1_committed() will be called at the end of an evac pause
 947   // when marking is on. So, it's also called at the end of the
 948   // initial-mark pause to update the heap end, if the heap expands
 949   // during it. No need to call it here.
 950 }
 951 
 952 /*
 953  * Notice that in the next two methods, we actually leave the STS
 954  * during the barrier sync and join it immediately afterwards. If we
 955  * do not do this, the following deadlock can occur: one thread could
 956  * be in the barrier sync code, waiting for the other thread to also
 957  * sync up, whereas another one could be trying to yield, while also
 958  * waiting for the other threads to sync up too.
 959  *
 960  * Note, however, that this code is also used during remark and in
 961  * this case we should not attempt to leave / enter the STS, otherwise
 962  * we'll either hit an assert (debug / fastdebug) or deadlock
 963  * (product). So we should only leave / enter the STS if we are
 964  * operating concurrently.
 965  *
 966  * Because the thread that does the sync barrier has left the STS, it
 967  * is possible to be suspended for a Full GC or an evacuation pause
 968  * could occur. This is actually safe, since the entering the sync
 969  * barrier is one of the last things do_marking_step() does, and it
 970  * doesn't manipulate any data structures afterwards.
 971  */
 972 
 973 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 974   if (verbose_low()) {
 975     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 976   }
 977 
 978   if (concurrent()) {
 979     SuspendibleThreadSet::leave();
 980   }
 981   _first_overflow_barrier_sync.enter();
 982   if (concurrent()) {
 983     SuspendibleThreadSet::join();
 984   }
 985   // at this point everyone should have synced up and not be doing any
 986   // more work
 987 
 988   if (verbose_low()) {
 989     gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 990   }
 991 
 992   // If we're executing the concurrent phase of marking, reset the marking
 993   // state; otherwise the marking state is reset after reference processing,
 994   // during the remark pause.
 995   // If we reset here as a result of an overflow during the remark we will
 996   // see assertion failures from any subsequent set_concurrency_and_phase()
 997   // calls.
 998   if (concurrent()) {
 999     // let the task associated with with worker 0 do this
1000     if (worker_id == 0) {
1001       // task 0 is responsible for clearing the global data structures
1002       // We should be here because of an overflow. During STW we should
1003       // not clear the overflow flag since we rely on it being true when
1004       // we exit this method to abort the pause and restart concurrent
1005       // marking.
1006       reset_marking_state(true /* clear_overflow */);
1007       force_overflow()->update();
1008 
1009       if (G1Log::fine()) {
1010         gclog_or_tty->date_stamp(PrintGCDateStamps);
1011         gclog_or_tty->stamp(PrintGCTimeStamps);
1012         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1013       }
1014     }
1015   }
1016 
1017   // after this, each task should reset its own data structures then
1018   // then go into the second barrier
1019 }
1020 
1021 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1022   if (verbose_low()) {
1023     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1024   }
1025 
1026   if (concurrent()) {
1027     SuspendibleThreadSet::leave();
1028   }
1029   _second_overflow_barrier_sync.enter();
1030   if (concurrent()) {
1031     SuspendibleThreadSet::join();
1032   }
1033   // at this point everything should be re-initialized and ready to go
1034 
1035   if (verbose_low()) {
1036     gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1037   }
1038 }
1039 
1040 #ifndef PRODUCT
1041 void ForceOverflowSettings::init() {
1042   _num_remaining = G1ConcMarkForceOverflow;
1043   _force = false;
1044   update();
1045 }
1046 
1047 void ForceOverflowSettings::update() {
1048   if (_num_remaining > 0) {
1049     _num_remaining -= 1;
1050     _force = true;
1051   } else {
1052     _force = false;
1053   }
1054 }
1055 
1056 bool ForceOverflowSettings::should_force() {
1057   if (_force) {
1058     _force = false;
1059     return true;
1060   } else {
1061     return false;
1062   }
1063 }
1064 #endif // !PRODUCT
1065 
1066 class CMConcurrentMarkingTask: public AbstractGangTask {
1067 private:
1068   ConcurrentMark*       _cm;
1069   ConcurrentMarkThread* _cmt;
1070 
1071 public:
1072   void work(uint worker_id) {
1073     assert(Thread::current()->is_ConcurrentGC_thread(),
1074            "this should only be done by a conc GC thread");
1075     ResourceMark rm;
1076 
1077     double start_vtime = os::elapsedVTime();
1078 
1079     SuspendibleThreadSet::join();
1080 
1081     assert(worker_id < _cm->active_tasks(), "invariant");
1082     CMTask* the_task = _cm->task(worker_id);
1083     the_task->record_start_time();
1084     if (!_cm->has_aborted()) {
1085       do {
1086         double start_vtime_sec = os::elapsedVTime();
1087         double start_time_sec = os::elapsedTime();
1088         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1089 
1090         the_task->do_marking_step(mark_step_duration_ms,
1091                                   true  /* do_termination */,
1092                                   false /* is_serial*/);
1093 
1094         double end_time_sec = os::elapsedTime();
1095         double end_vtime_sec = os::elapsedVTime();
1096         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1097         double elapsed_time_sec = end_time_sec - start_time_sec;
1098         _cm->clear_has_overflown();
1099 
1100         bool ret = _cm->do_yield_check(worker_id);
1101 
1102         jlong sleep_time_ms;
1103         if (!_cm->has_aborted() && the_task->has_aborted()) {
1104           sleep_time_ms =
1105             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1106           SuspendibleThreadSet::leave();
1107           os::sleep(Thread::current(), sleep_time_ms, false);
1108           SuspendibleThreadSet::join();
1109         }
1110         double end_time2_sec = os::elapsedTime();
1111         double elapsed_time2_sec = end_time2_sec - start_time_sec;
1112 
1113 #if 0
1114           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1115                                  "overhead %1.4lf",
1116                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1117                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
1118           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1119                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1120 #endif
1121       } while (!_cm->has_aborted() && the_task->has_aborted());
1122     }
1123     the_task->record_end_time();
1124     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1125 
1126     SuspendibleThreadSet::leave();
1127 
1128     double end_vtime = os::elapsedVTime();
1129     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1130   }
1131 
1132   CMConcurrentMarkingTask(ConcurrentMark* cm,
1133                           ConcurrentMarkThread* cmt) :
1134       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1135 
1136   ~CMConcurrentMarkingTask() { }
1137 };
1138 
1139 // Calculates the number of active workers for a concurrent
1140 // phase.
1141 uint ConcurrentMark::calc_parallel_marking_threads() {
1142   if (G1CollectedHeap::use_parallel_gc_threads()) {
1143     uint n_conc_workers = 0;
1144     if (!UseDynamicNumberOfGCThreads ||
1145         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1146          !ForceDynamicNumberOfGCThreads)) {
1147       n_conc_workers = max_parallel_marking_threads();
1148     } else {
1149       n_conc_workers =
1150         AdaptiveSizePolicy::calc_default_active_workers(
1151                                      max_parallel_marking_threads(),
1152                                      1, /* Minimum workers */
1153                                      parallel_marking_threads(),
1154                                      Threads::number_of_non_daemon_threads());
1155       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1156       // that scaling has already gone into "_max_parallel_marking_threads".
1157     }
1158     assert(n_conc_workers > 0, "Always need at least 1");
1159     return n_conc_workers;
1160   }
1161   // If we are not running with any parallel GC threads we will not
1162   // have spawned any marking threads either. Hence the number of
1163   // concurrent workers should be 0.
1164   return 0;
1165 }
1166 
1167 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1168   // Currently, only survivors can be root regions.
1169   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1170   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1171 
1172   const uintx interval = PrefetchScanIntervalInBytes;
1173   HeapWord* curr = hr->bottom();
1174   const HeapWord* end = hr->top();
1175   while (curr < end) {
1176     Prefetch::read(curr, interval);
1177     oop obj = oop(curr);
1178     int size = obj->oop_iterate(&cl);
1179     assert(size == obj->size(), "sanity");
1180     curr += size;
1181   }
1182 }
1183 
1184 class CMRootRegionScanTask : public AbstractGangTask {
1185 private:
1186   ConcurrentMark* _cm;
1187 
1188 public:
1189   CMRootRegionScanTask(ConcurrentMark* cm) :
1190     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1191 
1192   void work(uint worker_id) {
1193     assert(Thread::current()->is_ConcurrentGC_thread(),
1194            "this should only be done by a conc GC thread");
1195 
1196     CMRootRegions* root_regions = _cm->root_regions();
1197     HeapRegion* hr = root_regions->claim_next();
1198     while (hr != NULL) {
1199       _cm->scanRootRegion(hr, worker_id);
1200       hr = root_regions->claim_next();
1201     }
1202   }
1203 };
1204 
1205 void ConcurrentMark::scanRootRegions() {
1206   // scan_in_progress() will have been set to true only if there was
1207   // at least one root region to scan. So, if it's false, we
1208   // should not attempt to do any further work.
1209   if (root_regions()->scan_in_progress()) {
1210     _parallel_marking_threads = calc_parallel_marking_threads();
1211     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1212            "Maximum number of marking threads exceeded");
1213     uint active_workers = MAX2(1U, parallel_marking_threads());
1214 
1215     CMRootRegionScanTask task(this);
1216     if (use_parallel_marking_threads()) {
1217       _parallel_workers->set_active_workers((int) active_workers);
1218       _parallel_workers->run_task(&task);
1219     } else {
1220       task.work(0);
1221     }
1222 
1223     // It's possible that has_aborted() is true here without actually
1224     // aborting the survivor scan earlier. This is OK as it's
1225     // mainly used for sanity checking.
1226     root_regions()->scan_finished();
1227   }
1228 }
1229 
1230 void ConcurrentMark::markFromRoots() {
1231   // we might be tempted to assert that:
1232   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1233   //        "inconsistent argument?");
1234   // However that wouldn't be right, because it's possible that
1235   // a safepoint is indeed in progress as a younger generation
1236   // stop-the-world GC happens even as we mark in this generation.
1237 
1238   _restart_for_overflow = false;
1239   force_overflow_conc()->init();
1240 
1241   // _g1h has _n_par_threads
1242   _parallel_marking_threads = calc_parallel_marking_threads();
1243   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1244     "Maximum number of marking threads exceeded");
1245 
1246   uint active_workers = MAX2(1U, parallel_marking_threads());
1247 
1248   // Parallel task terminator is set in "set_concurrency_and_phase()"
1249   set_concurrency_and_phase(active_workers, true /* concurrent */);
1250 
1251   CMConcurrentMarkingTask markingTask(this, cmThread());
1252   if (use_parallel_marking_threads()) {
1253     _parallel_workers->set_active_workers((int)active_workers);
1254     // Don't set _n_par_threads because it affects MT in process_strong_roots()
1255     // and the decisions on that MT processing is made elsewhere.
1256     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1257     _parallel_workers->run_task(&markingTask);
1258   } else {
1259     markingTask.work(0);
1260   }
1261   print_stats();
1262 }
1263 
1264 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1265   // world is stopped at this checkpoint
1266   assert(SafepointSynchronize::is_at_safepoint(),
1267          "world should be stopped");
1268 
1269   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1270 
1271   // If a full collection has happened, we shouldn't do this.
1272   if (has_aborted()) {
1273     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1274     return;
1275   }
1276 
1277   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1278 
1279   if (VerifyDuringGC) {
1280     HandleMark hm;  // handle scope
1281     Universe::heap()->prepare_for_verify();
1282     Universe::verify(VerifyOption_G1UsePrevMarking,
1283                      " VerifyDuringGC:(before)");
1284   }
1285   g1h->check_bitmaps("Remark Start");
1286 
1287   G1CollectorPolicy* g1p = g1h->g1_policy();
1288   g1p->record_concurrent_mark_remark_start();
1289 
1290   double start = os::elapsedTime();
1291 
1292   checkpointRootsFinalWork();
1293 
1294   double mark_work_end = os::elapsedTime();
1295 
1296   weakRefsWork(clear_all_soft_refs);
1297 
1298   if (has_overflown()) {
1299     // Oops.  We overflowed.  Restart concurrent marking.
1300     _restart_for_overflow = true;
1301     if (G1TraceMarkStackOverflow) {
1302       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1303     }
1304 
1305     // Verify the heap w.r.t. the previous marking bitmap.
1306     if (VerifyDuringGC) {
1307       HandleMark hm;  // handle scope
1308       Universe::heap()->prepare_for_verify();
1309       Universe::verify(VerifyOption_G1UsePrevMarking,
1310                        " VerifyDuringGC:(overflow)");
1311     }
1312 
1313     // Clear the marking state because we will be restarting
1314     // marking due to overflowing the global mark stack.
1315     reset_marking_state();
1316   } else {
1317     // Aggregate the per-task counting data that we have accumulated
1318     // while marking.
1319     aggregate_count_data();
1320 
1321     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1322     // We're done with marking.
1323     // This is the end of  the marking cycle, we're expected all
1324     // threads to have SATB queues with active set to true.
1325     satb_mq_set.set_active_all_threads(false, /* new active value */
1326                                        true /* expected_active */);
1327 
1328     if (VerifyDuringGC) {
1329       HandleMark hm;  // handle scope
1330       Universe::heap()->prepare_for_verify();
1331       Universe::verify(VerifyOption_G1UseNextMarking,
1332                        " VerifyDuringGC:(after)");
1333     }
1334     g1h->check_bitmaps("Remark End");
1335     assert(!restart_for_overflow(), "sanity");
1336     // Completely reset the marking state since marking completed
1337     set_non_marking_state();
1338   }
1339 
1340   // Expand the marking stack, if we have to and if we can.
1341   if (_markStack.should_expand()) {
1342     _markStack.expand();
1343   }
1344 
1345   // Statistics
1346   double now = os::elapsedTime();
1347   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1348   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1349   _remark_times.add((now - start) * 1000.0);
1350 
1351   g1p->record_concurrent_mark_remark_end();
1352 
1353   G1CMIsAliveClosure is_alive(g1h);
1354   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1355 }
1356 
1357 // Base class of the closures that finalize and verify the
1358 // liveness counting data.
1359 class CMCountDataClosureBase: public HeapRegionClosure {
1360 protected:
1361   G1CollectedHeap* _g1h;
1362   ConcurrentMark* _cm;
1363   CardTableModRefBS* _ct_bs;
1364 
1365   BitMap* _region_bm;
1366   BitMap* _card_bm;
1367 
1368   // Takes a region that's not empty (i.e., it has at least one
1369   // live object in it and sets its corresponding bit on the region
1370   // bitmap to 1. If the region is "starts humongous" it will also set
1371   // to 1 the bits on the region bitmap that correspond to its
1372   // associated "continues humongous" regions.
1373   void set_bit_for_region(HeapRegion* hr) {
1374     assert(!hr->continuesHumongous(), "should have filtered those out");
1375 
1376     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1377     if (!hr->startsHumongous()) {
1378       // Normal (non-humongous) case: just set the bit.
1379       _region_bm->par_at_put(index, true);
1380     } else {
1381       // Starts humongous case: calculate how many regions are part of
1382       // this humongous region and then set the bit range.
1383       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1384       _region_bm->par_at_put_range(index, end_index, true);
1385     }
1386   }
1387 
1388 public:
1389   CMCountDataClosureBase(G1CollectedHeap* g1h,
1390                          BitMap* region_bm, BitMap* card_bm):
1391     _g1h(g1h), _cm(g1h->concurrent_mark()),
1392     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1393     _region_bm(region_bm), _card_bm(card_bm) { }
1394 };
1395 
1396 // Closure that calculates the # live objects per region. Used
1397 // for verification purposes during the cleanup pause.
1398 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1399   CMBitMapRO* _bm;
1400   size_t _region_marked_bytes;
1401 
1402 public:
1403   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1404                          BitMap* region_bm, BitMap* card_bm) :
1405     CMCountDataClosureBase(g1h, region_bm, card_bm),
1406     _bm(bm), _region_marked_bytes(0) { }
1407 
1408   bool doHeapRegion(HeapRegion* hr) {
1409 
1410     if (hr->continuesHumongous()) {
1411       // We will ignore these here and process them when their
1412       // associated "starts humongous" region is processed (see
1413       // set_bit_for_heap_region()). Note that we cannot rely on their
1414       // associated "starts humongous" region to have their bit set to
1415       // 1 since, due to the region chunking in the parallel region
1416       // iteration, a "continues humongous" region might be visited
1417       // before its associated "starts humongous".
1418       return false;
1419     }
1420 
1421     HeapWord* ntams = hr->next_top_at_mark_start();
1422     HeapWord* start = hr->bottom();
1423 
1424     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1425            err_msg("Preconditions not met - "
1426                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1427                    start, ntams, hr->end()));
1428 
1429     // Find the first marked object at or after "start".
1430     start = _bm->getNextMarkedWordAddress(start, ntams);
1431 
1432     size_t marked_bytes = 0;
1433 
1434     while (start < ntams) {
1435       oop obj = oop(start);
1436       int obj_sz = obj->size();
1437       HeapWord* obj_end = start + obj_sz;
1438 
1439       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1440       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1441 
1442       // Note: if we're looking at the last region in heap - obj_end
1443       // could be actually just beyond the end of the heap; end_idx
1444       // will then correspond to a (non-existent) card that is also
1445       // just beyond the heap.
1446       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1447         // end of object is not card aligned - increment to cover
1448         // all the cards spanned by the object
1449         end_idx += 1;
1450       }
1451 
1452       // Set the bits in the card BM for the cards spanned by this object.
1453       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1454 
1455       // Add the size of this object to the number of marked bytes.
1456       marked_bytes += (size_t)obj_sz * HeapWordSize;
1457 
1458       // Find the next marked object after this one.
1459       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1460     }
1461 
1462     // Mark the allocated-since-marking portion...
1463     HeapWord* top = hr->top();
1464     if (ntams < top) {
1465       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1466       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1467 
1468       // Note: if we're looking at the last region in heap - top
1469       // could be actually just beyond the end of the heap; end_idx
1470       // will then correspond to a (non-existent) card that is also
1471       // just beyond the heap.
1472       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1473         // end of object is not card aligned - increment to cover
1474         // all the cards spanned by the object
1475         end_idx += 1;
1476       }
1477       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1478 
1479       // This definitely means the region has live objects.
1480       set_bit_for_region(hr);
1481     }
1482 
1483     // Update the live region bitmap.
1484     if (marked_bytes > 0) {
1485       set_bit_for_region(hr);
1486     }
1487 
1488     // Set the marked bytes for the current region so that
1489     // it can be queried by a calling verification routine
1490     _region_marked_bytes = marked_bytes;
1491 
1492     return false;
1493   }
1494 
1495   size_t region_marked_bytes() const { return _region_marked_bytes; }
1496 };
1497 
1498 // Heap region closure used for verifying the counting data
1499 // that was accumulated concurrently and aggregated during
1500 // the remark pause. This closure is applied to the heap
1501 // regions during the STW cleanup pause.
1502 
1503 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1504   G1CollectedHeap* _g1h;
1505   ConcurrentMark* _cm;
1506   CalcLiveObjectsClosure _calc_cl;
1507   BitMap* _region_bm;   // Region BM to be verified
1508   BitMap* _card_bm;     // Card BM to be verified
1509   bool _verbose;        // verbose output?
1510 
1511   BitMap* _exp_region_bm; // Expected Region BM values
1512   BitMap* _exp_card_bm;   // Expected card BM values
1513 
1514   int _failures;
1515 
1516 public:
1517   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1518                                 BitMap* region_bm,
1519                                 BitMap* card_bm,
1520                                 BitMap* exp_region_bm,
1521                                 BitMap* exp_card_bm,
1522                                 bool verbose) :
1523     _g1h(g1h), _cm(g1h->concurrent_mark()),
1524     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1525     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1526     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1527     _failures(0) { }
1528 
1529   int failures() const { return _failures; }
1530 
1531   bool doHeapRegion(HeapRegion* hr) {
1532     if (hr->continuesHumongous()) {
1533       // We will ignore these here and process them when their
1534       // associated "starts humongous" region is processed (see
1535       // set_bit_for_heap_region()). Note that we cannot rely on their
1536       // associated "starts humongous" region to have their bit set to
1537       // 1 since, due to the region chunking in the parallel region
1538       // iteration, a "continues humongous" region might be visited
1539       // before its associated "starts humongous".
1540       return false;
1541     }
1542 
1543     int failures = 0;
1544 
1545     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1546     // this region and set the corresponding bits in the expected region
1547     // and card bitmaps.
1548     bool res = _calc_cl.doHeapRegion(hr);
1549     assert(res == false, "should be continuing");
1550 
1551     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1552                     Mutex::_no_safepoint_check_flag);
1553 
1554     // Verify the marked bytes for this region.
1555     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1556     size_t act_marked_bytes = hr->next_marked_bytes();
1557 
1558     // We're not OK if expected marked bytes > actual marked bytes. It means
1559     // we have missed accounting some objects during the actual marking.
1560     if (exp_marked_bytes > act_marked_bytes) {
1561       if (_verbose) {
1562         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1563                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1564                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1565       }
1566       failures += 1;
1567     }
1568 
1569     // Verify the bit, for this region, in the actual and expected
1570     // (which was just calculated) region bit maps.
1571     // We're not OK if the bit in the calculated expected region
1572     // bitmap is set and the bit in the actual region bitmap is not.
1573     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1574 
1575     bool expected = _exp_region_bm->at(index);
1576     bool actual = _region_bm->at(index);
1577     if (expected && !actual) {
1578       if (_verbose) {
1579         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1580                                "expected: %s, actual: %s",
1581                                hr->hrs_index(),
1582                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1583       }
1584       failures += 1;
1585     }
1586 
1587     // Verify that the card bit maps for the cards spanned by the current
1588     // region match. We have an error if we have a set bit in the expected
1589     // bit map and the corresponding bit in the actual bitmap is not set.
1590 
1591     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1592     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1593 
1594     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1595       expected = _exp_card_bm->at(i);
1596       actual = _card_bm->at(i);
1597 
1598       if (expected && !actual) {
1599         if (_verbose) {
1600           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1601                                  "expected: %s, actual: %s",
1602                                  hr->hrs_index(), i,
1603                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1604         }
1605         failures += 1;
1606       }
1607     }
1608 
1609     if (failures > 0 && _verbose)  {
1610       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1611                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1612                              HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1613                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1614     }
1615 
1616     _failures += failures;
1617 
1618     // We could stop iteration over the heap when we
1619     // find the first violating region by returning true.
1620     return false;
1621   }
1622 };
1623 
1624 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1625 protected:
1626   G1CollectedHeap* _g1h;
1627   ConcurrentMark* _cm;
1628   BitMap* _actual_region_bm;
1629   BitMap* _actual_card_bm;
1630 
1631   uint    _n_workers;
1632 
1633   BitMap* _expected_region_bm;
1634   BitMap* _expected_card_bm;
1635 
1636   int  _failures;
1637   bool _verbose;
1638 
1639 public:
1640   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1641                             BitMap* region_bm, BitMap* card_bm,
1642                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1643     : AbstractGangTask("G1 verify final counting"),
1644       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1645       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1646       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1647       _failures(0), _verbose(false),
1648       _n_workers(0) {
1649     assert(VerifyDuringGC, "don't call this otherwise");
1650 
1651     // Use the value already set as the number of active threads
1652     // in the call to run_task().
1653     if (G1CollectedHeap::use_parallel_gc_threads()) {
1654       assert( _g1h->workers()->active_workers() > 0,
1655         "Should have been previously set");
1656       _n_workers = _g1h->workers()->active_workers();
1657     } else {
1658       _n_workers = 1;
1659     }
1660 
1661     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1662     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1663 
1664     _verbose = _cm->verbose_medium();
1665   }
1666 
1667   void work(uint worker_id) {
1668     assert(worker_id < _n_workers, "invariant");
1669 
1670     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1671                                             _actual_region_bm, _actual_card_bm,
1672                                             _expected_region_bm,
1673                                             _expected_card_bm,
1674                                             _verbose);
1675 
1676     if (G1CollectedHeap::use_parallel_gc_threads()) {
1677       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1678                                             worker_id,
1679                                             _n_workers,
1680                                             HeapRegion::VerifyCountClaimValue);
1681     } else {
1682       _g1h->heap_region_iterate(&verify_cl);
1683     }
1684 
1685     Atomic::add(verify_cl.failures(), &_failures);
1686   }
1687 
1688   int failures() const { return _failures; }
1689 };
1690 
1691 // Closure that finalizes the liveness counting data.
1692 // Used during the cleanup pause.
1693 // Sets the bits corresponding to the interval [NTAMS, top]
1694 // (which contains the implicitly live objects) in the
1695 // card liveness bitmap. Also sets the bit for each region,
1696 // containing live data, in the region liveness bitmap.
1697 
1698 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1699  public:
1700   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1701                               BitMap* region_bm,
1702                               BitMap* card_bm) :
1703     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1704 
1705   bool doHeapRegion(HeapRegion* hr) {
1706 
1707     if (hr->continuesHumongous()) {
1708       // We will ignore these here and process them when their
1709       // associated "starts humongous" region is processed (see
1710       // set_bit_for_heap_region()). Note that we cannot rely on their
1711       // associated "starts humongous" region to have their bit set to
1712       // 1 since, due to the region chunking in the parallel region
1713       // iteration, a "continues humongous" region might be visited
1714       // before its associated "starts humongous".
1715       return false;
1716     }
1717 
1718     HeapWord* ntams = hr->next_top_at_mark_start();
1719     HeapWord* top   = hr->top();
1720 
1721     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1722 
1723     // Mark the allocated-since-marking portion...
1724     if (ntams < top) {
1725       // This definitely means the region has live objects.
1726       set_bit_for_region(hr);
1727 
1728       // Now set the bits in the card bitmap for [ntams, top)
1729       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1730       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1731 
1732       // Note: if we're looking at the last region in heap - top
1733       // could be actually just beyond the end of the heap; end_idx
1734       // will then correspond to a (non-existent) card that is also
1735       // just beyond the heap.
1736       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1737         // end of object is not card aligned - increment to cover
1738         // all the cards spanned by the object
1739         end_idx += 1;
1740       }
1741 
1742       assert(end_idx <= _card_bm->size(),
1743              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1744                      end_idx, _card_bm->size()));
1745       assert(start_idx < _card_bm->size(),
1746              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1747                      start_idx, _card_bm->size()));
1748 
1749       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1750     }
1751 
1752     // Set the bit for the region if it contains live data
1753     if (hr->next_marked_bytes() > 0) {
1754       set_bit_for_region(hr);
1755     }
1756 
1757     return false;
1758   }
1759 };
1760 
1761 class G1ParFinalCountTask: public AbstractGangTask {
1762 protected:
1763   G1CollectedHeap* _g1h;
1764   ConcurrentMark* _cm;
1765   BitMap* _actual_region_bm;
1766   BitMap* _actual_card_bm;
1767 
1768   uint    _n_workers;
1769 
1770 public:
1771   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1772     : AbstractGangTask("G1 final counting"),
1773       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1774       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1775       _n_workers(0) {
1776     // Use the value already set as the number of active threads
1777     // in the call to run_task().
1778     if (G1CollectedHeap::use_parallel_gc_threads()) {
1779       assert( _g1h->workers()->active_workers() > 0,
1780         "Should have been previously set");
1781       _n_workers = _g1h->workers()->active_workers();
1782     } else {
1783       _n_workers = 1;
1784     }
1785   }
1786 
1787   void work(uint worker_id) {
1788     assert(worker_id < _n_workers, "invariant");
1789 
1790     FinalCountDataUpdateClosure final_update_cl(_g1h,
1791                                                 _actual_region_bm,
1792                                                 _actual_card_bm);
1793 
1794     if (G1CollectedHeap::use_parallel_gc_threads()) {
1795       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1796                                             worker_id,
1797                                             _n_workers,
1798                                             HeapRegion::FinalCountClaimValue);
1799     } else {
1800       _g1h->heap_region_iterate(&final_update_cl);
1801     }
1802   }
1803 };
1804 
1805 class G1ParNoteEndTask;
1806 
1807 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1808   G1CollectedHeap* _g1;
1809   size_t _max_live_bytes;
1810   uint _regions_claimed;
1811   size_t _freed_bytes;
1812   FreeRegionList* _local_cleanup_list;
1813   HeapRegionSetCount _old_regions_removed;
1814   HeapRegionSetCount _humongous_regions_removed;
1815   HRRSCleanupTask* _hrrs_cleanup_task;
1816   double _claimed_region_time;
1817   double _max_region_time;
1818 
1819 public:
1820   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1821                              FreeRegionList* local_cleanup_list,
1822                              HRRSCleanupTask* hrrs_cleanup_task) :
1823     _g1(g1),
1824     _max_live_bytes(0), _regions_claimed(0),
1825     _freed_bytes(0),
1826     _claimed_region_time(0.0), _max_region_time(0.0),
1827     _local_cleanup_list(local_cleanup_list),
1828     _old_regions_removed(),
1829     _humongous_regions_removed(),
1830     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1831 
1832   size_t freed_bytes() { return _freed_bytes; }
1833   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1834   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1835 
1836   bool doHeapRegion(HeapRegion *hr) {
1837     if (hr->continuesHumongous()) {
1838       return false;
1839     }
1840     // We use a claim value of zero here because all regions
1841     // were claimed with value 1 in the FinalCount task.
1842     _g1->reset_gc_time_stamps(hr);
1843     double start = os::elapsedTime();
1844     _regions_claimed++;
1845     hr->note_end_of_marking();
1846     _max_live_bytes += hr->max_live_bytes();
1847 
1848     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1849       _freed_bytes += hr->used();
1850       hr->set_containing_set(NULL);
1851       if (hr->isHumongous()) {
1852         assert(hr->startsHumongous(), "we should only see starts humongous");
1853         _humongous_regions_removed.increment(1u, hr->capacity());
1854         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1855       } else {
1856         _old_regions_removed.increment(1u, hr->capacity());
1857         _g1->free_region(hr, _local_cleanup_list, true);
1858       }
1859     } else {
1860       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1861     }
1862 
1863     double region_time = (os::elapsedTime() - start);
1864     _claimed_region_time += region_time;
1865     if (region_time > _max_region_time) {
1866       _max_region_time = region_time;
1867     }
1868     return false;
1869   }
1870 
1871   size_t max_live_bytes() { return _max_live_bytes; }
1872   uint regions_claimed() { return _regions_claimed; }
1873   double claimed_region_time_sec() { return _claimed_region_time; }
1874   double max_region_time_sec() { return _max_region_time; }
1875 };
1876 
1877 class G1ParNoteEndTask: public AbstractGangTask {
1878   friend class G1NoteEndOfConcMarkClosure;
1879 
1880 protected:
1881   G1CollectedHeap* _g1h;
1882   size_t _max_live_bytes;
1883   size_t _freed_bytes;
1884   FreeRegionList* _cleanup_list;
1885 
1886 public:
1887   G1ParNoteEndTask(G1CollectedHeap* g1h,
1888                    FreeRegionList* cleanup_list) :
1889     AbstractGangTask("G1 note end"), _g1h(g1h),
1890     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1891 
1892   void work(uint worker_id) {
1893     double start = os::elapsedTime();
1894     FreeRegionList local_cleanup_list("Local Cleanup List");
1895     HRRSCleanupTask hrrs_cleanup_task;
1896     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1897                                            &hrrs_cleanup_task);
1898     if (G1CollectedHeap::use_parallel_gc_threads()) {
1899       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1900                                             _g1h->workers()->active_workers(),
1901                                             HeapRegion::NoteEndClaimValue);
1902     } else {
1903       _g1h->heap_region_iterate(&g1_note_end);
1904     }
1905     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1906 
1907     // Now update the lists
1908     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1909     {
1910       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1911       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1912       _max_live_bytes += g1_note_end.max_live_bytes();
1913       _freed_bytes += g1_note_end.freed_bytes();
1914 
1915       // If we iterate over the global cleanup list at the end of
1916       // cleanup to do this printing we will not guarantee to only
1917       // generate output for the newly-reclaimed regions (the list
1918       // might not be empty at the beginning of cleanup; we might
1919       // still be working on its previous contents). So we do the
1920       // printing here, before we append the new regions to the global
1921       // cleanup list.
1922 
1923       G1HRPrinter* hr_printer = _g1h->hr_printer();
1924       if (hr_printer->is_active()) {
1925         FreeRegionListIterator iter(&local_cleanup_list);
1926         while (iter.more_available()) {
1927           HeapRegion* hr = iter.get_next();
1928           hr_printer->cleanup(hr);
1929         }
1930       }
1931 
1932       _cleanup_list->add_ordered(&local_cleanup_list);
1933       assert(local_cleanup_list.is_empty(), "post-condition");
1934 
1935       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1936     }
1937   }
1938   size_t max_live_bytes() { return _max_live_bytes; }
1939   size_t freed_bytes() { return _freed_bytes; }
1940 };
1941 
1942 class G1ParScrubRemSetTask: public AbstractGangTask {
1943 protected:
1944   G1RemSet* _g1rs;
1945   BitMap* _region_bm;
1946   BitMap* _card_bm;
1947 public:
1948   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1949                        BitMap* region_bm, BitMap* card_bm) :
1950     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1951     _region_bm(region_bm), _card_bm(card_bm) { }
1952 
1953   void work(uint worker_id) {
1954     if (G1CollectedHeap::use_parallel_gc_threads()) {
1955       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1956                        HeapRegion::ScrubRemSetClaimValue);
1957     } else {
1958       _g1rs->scrub(_region_bm, _card_bm);
1959     }
1960   }
1961 
1962 };
1963 
1964 void ConcurrentMark::cleanup() {
1965   // world is stopped at this checkpoint
1966   assert(SafepointSynchronize::is_at_safepoint(),
1967          "world should be stopped");
1968   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1969 
1970   // If a full collection has happened, we shouldn't do this.
1971   if (has_aborted()) {
1972     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1973     return;
1974   }
1975 
1976   g1h->verify_region_sets_optional();
1977 
1978   if (VerifyDuringGC) {
1979     HandleMark hm;  // handle scope
1980     Universe::heap()->prepare_for_verify();
1981     Universe::verify(VerifyOption_G1UsePrevMarking,
1982                      " VerifyDuringGC:(before)");
1983   }
1984   g1h->check_bitmaps("Cleanup Start");
1985 
1986   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1987   g1p->record_concurrent_mark_cleanup_start();
1988 
1989   double start = os::elapsedTime();
1990 
1991   HeapRegionRemSet::reset_for_cleanup_tasks();
1992 
1993   uint n_workers;
1994 
1995   // Do counting once more with the world stopped for good measure.
1996   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1997 
1998   if (G1CollectedHeap::use_parallel_gc_threads()) {
1999    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2000            "sanity check");
2001 
2002     g1h->set_par_threads();
2003     n_workers = g1h->n_par_threads();
2004     assert(g1h->n_par_threads() == n_workers,
2005            "Should not have been reset");
2006     g1h->workers()->run_task(&g1_par_count_task);
2007     // Done with the parallel phase so reset to 0.
2008     g1h->set_par_threads(0);
2009 
2010     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2011            "sanity check");
2012   } else {
2013     n_workers = 1;
2014     g1_par_count_task.work(0);
2015   }
2016 
2017   if (VerifyDuringGC) {
2018     // Verify that the counting data accumulated during marking matches
2019     // that calculated by walking the marking bitmap.
2020 
2021     // Bitmaps to hold expected values
2022     BitMap expected_region_bm(_region_bm.size(), true);
2023     BitMap expected_card_bm(_card_bm.size(), true);
2024 
2025     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2026                                                  &_region_bm,
2027                                                  &_card_bm,
2028                                                  &expected_region_bm,
2029                                                  &expected_card_bm);
2030 
2031     if (G1CollectedHeap::use_parallel_gc_threads()) {
2032       g1h->set_par_threads((int)n_workers);
2033       g1h->workers()->run_task(&g1_par_verify_task);
2034       // Done with the parallel phase so reset to 0.
2035       g1h->set_par_threads(0);
2036 
2037       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2038              "sanity check");
2039     } else {
2040       g1_par_verify_task.work(0);
2041     }
2042 
2043     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2044   }
2045 
2046   size_t start_used_bytes = g1h->used();
2047   g1h->set_marking_complete();
2048 
2049   double count_end = os::elapsedTime();
2050   double this_final_counting_time = (count_end - start);
2051   _total_counting_time += this_final_counting_time;
2052 
2053   if (G1PrintRegionLivenessInfo) {
2054     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2055     _g1h->heap_region_iterate(&cl);
2056   }
2057 
2058   // Install newly created mark bitMap as "prev".
2059   swapMarkBitMaps();
2060 
2061   g1h->reset_gc_time_stamp();
2062 
2063   // Note end of marking in all heap regions.
2064   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2065   if (G1CollectedHeap::use_parallel_gc_threads()) {
2066     g1h->set_par_threads((int)n_workers);
2067     g1h->workers()->run_task(&g1_par_note_end_task);
2068     g1h->set_par_threads(0);
2069 
2070     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2071            "sanity check");
2072   } else {
2073     g1_par_note_end_task.work(0);
2074   }
2075   g1h->check_gc_time_stamps();
2076 
2077   if (!cleanup_list_is_empty()) {
2078     // The cleanup list is not empty, so we'll have to process it
2079     // concurrently. Notify anyone else that might be wanting free
2080     // regions that there will be more free regions coming soon.
2081     g1h->set_free_regions_coming();
2082   }
2083 
2084   // call below, since it affects the metric by which we sort the heap
2085   // regions.
2086   if (G1ScrubRemSets) {
2087     double rs_scrub_start = os::elapsedTime();
2088     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2089     if (G1CollectedHeap::use_parallel_gc_threads()) {
2090       g1h->set_par_threads((int)n_workers);
2091       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2092       g1h->set_par_threads(0);
2093 
2094       assert(g1h->check_heap_region_claim_values(
2095                                             HeapRegion::ScrubRemSetClaimValue),
2096              "sanity check");
2097     } else {
2098       g1_par_scrub_rs_task.work(0);
2099     }
2100 
2101     double rs_scrub_end = os::elapsedTime();
2102     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2103     _total_rs_scrub_time += this_rs_scrub_time;
2104   }
2105 
2106   // this will also free any regions totally full of garbage objects,
2107   // and sort the regions.
2108   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2109 
2110   // Statistics.
2111   double end = os::elapsedTime();
2112   _cleanup_times.add((end - start) * 1000.0);
2113 
2114   if (G1Log::fine()) {
2115     g1h->print_size_transition(gclog_or_tty,
2116                                start_used_bytes,
2117                                g1h->used(),
2118                                g1h->capacity());
2119   }
2120 
2121   // Clean up will have freed any regions completely full of garbage.
2122   // Update the soft reference policy with the new heap occupancy.
2123   Universe::update_heap_info_at_gc();
2124 
2125   // We need to make this be a "collection" so any collection pause that
2126   // races with it goes around and waits for completeCleanup to finish.
2127   g1h->increment_total_collections();
2128 
2129   // We reclaimed old regions so we should calculate the sizes to make
2130   // sure we update the old gen/space data.
2131   g1h->g1mm()->update_sizes();
2132 
2133   if (VerifyDuringGC) {
2134     HandleMark hm;  // handle scope
2135     Universe::heap()->prepare_for_verify();
2136     Universe::verify(VerifyOption_G1UsePrevMarking,
2137                      " VerifyDuringGC:(after)");
2138   }
2139   g1h->check_bitmaps("Cleanup End");
2140 
2141   g1h->verify_region_sets_optional();
2142   g1h->trace_heap_after_concurrent_cycle();
2143 }
2144 
2145 void ConcurrentMark::completeCleanup() {
2146   if (has_aborted()) return;
2147 
2148   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2149 
2150   _cleanup_list.verify_optional();
2151   FreeRegionList tmp_free_list("Tmp Free List");
2152 
2153   if (G1ConcRegionFreeingVerbose) {
2154     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2155                            "cleanup list has %u entries",
2156                            _cleanup_list.length());
2157   }
2158 
2159   // Noone else should be accessing the _cleanup_list at this point,
2160   // so it's not necessary to take any locks
2161   while (!_cleanup_list.is_empty()) {
2162     HeapRegion* hr = _cleanup_list.remove_head();
2163     assert(hr != NULL, "Got NULL from a non-empty list");
2164     hr->par_clear();
2165     tmp_free_list.add_ordered(hr);
2166 
2167     // Instead of adding one region at a time to the secondary_free_list,
2168     // we accumulate them in the local list and move them a few at a
2169     // time. This also cuts down on the number of notify_all() calls
2170     // we do during this process. We'll also append the local list when
2171     // _cleanup_list is empty (which means we just removed the last
2172     // region from the _cleanup_list).
2173     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2174         _cleanup_list.is_empty()) {
2175       if (G1ConcRegionFreeingVerbose) {
2176         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2177                                "appending %u entries to the secondary_free_list, "
2178                                "cleanup list still has %u entries",
2179                                tmp_free_list.length(),
2180                                _cleanup_list.length());
2181       }
2182 
2183       {
2184         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2185         g1h->secondary_free_list_add(&tmp_free_list);
2186         SecondaryFreeList_lock->notify_all();
2187       }
2188 
2189       if (G1StressConcRegionFreeing) {
2190         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2191           os::sleep(Thread::current(), (jlong) 1, false);
2192         }
2193       }
2194     }
2195   }
2196   assert(tmp_free_list.is_empty(), "post-condition");
2197 }
2198 
2199 // Supporting Object and Oop closures for reference discovery
2200 // and processing in during marking
2201 
2202 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2203   HeapWord* addr = (HeapWord*)obj;
2204   return addr != NULL &&
2205          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2206 }
2207 
2208 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2209 // Uses the CMTask associated with a worker thread (for serial reference
2210 // processing the CMTask for worker 0 is used) to preserve (mark) and
2211 // trace referent objects.
2212 //
2213 // Using the CMTask and embedded local queues avoids having the worker
2214 // threads operating on the global mark stack. This reduces the risk
2215 // of overflowing the stack - which we would rather avoid at this late
2216 // state. Also using the tasks' local queues removes the potential
2217 // of the workers interfering with each other that could occur if
2218 // operating on the global stack.
2219 
2220 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2221   ConcurrentMark* _cm;
2222   CMTask*         _task;
2223   int             _ref_counter_limit;
2224   int             _ref_counter;
2225   bool            _is_serial;
2226  public:
2227   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2228     _cm(cm), _task(task), _is_serial(is_serial),
2229     _ref_counter_limit(G1RefProcDrainInterval) {
2230     assert(_ref_counter_limit > 0, "sanity");
2231     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2232     _ref_counter = _ref_counter_limit;
2233   }
2234 
2235   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2236   virtual void do_oop(      oop* p) { do_oop_work(p); }
2237 
2238   template <class T> void do_oop_work(T* p) {
2239     if (!_cm->has_overflown()) {
2240       oop obj = oopDesc::load_decode_heap_oop(p);
2241       if (_cm->verbose_high()) {
2242         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2243                                "*"PTR_FORMAT" = "PTR_FORMAT,
2244                                _task->worker_id(), p, (void*) obj);
2245       }
2246 
2247       _task->deal_with_reference(obj);
2248       _ref_counter--;
2249 
2250       if (_ref_counter == 0) {
2251         // We have dealt with _ref_counter_limit references, pushing them
2252         // and objects reachable from them on to the local stack (and
2253         // possibly the global stack). Call CMTask::do_marking_step() to
2254         // process these entries.
2255         //
2256         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2257         // there's nothing more to do (i.e. we're done with the entries that
2258         // were pushed as a result of the CMTask::deal_with_reference() calls
2259         // above) or we overflow.
2260         //
2261         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2262         // flag while there may still be some work to do. (See the comment at
2263         // the beginning of CMTask::do_marking_step() for those conditions -
2264         // one of which is reaching the specified time target.) It is only
2265         // when CMTask::do_marking_step() returns without setting the
2266         // has_aborted() flag that the marking step has completed.
2267         do {
2268           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2269           _task->do_marking_step(mark_step_duration_ms,
2270                                  false      /* do_termination */,
2271                                  _is_serial);
2272         } while (_task->has_aborted() && !_cm->has_overflown());
2273         _ref_counter = _ref_counter_limit;
2274       }
2275     } else {
2276       if (_cm->verbose_high()) {
2277          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2278       }
2279     }
2280   }
2281 };
2282 
2283 // 'Drain' oop closure used by both serial and parallel reference processing.
2284 // Uses the CMTask associated with a given worker thread (for serial
2285 // reference processing the CMtask for worker 0 is used). Calls the
2286 // do_marking_step routine, with an unbelievably large timeout value,
2287 // to drain the marking data structures of the remaining entries
2288 // added by the 'keep alive' oop closure above.
2289 
2290 class G1CMDrainMarkingStackClosure: public VoidClosure {
2291   ConcurrentMark* _cm;
2292   CMTask*         _task;
2293   bool            _is_serial;
2294  public:
2295   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2296     _cm(cm), _task(task), _is_serial(is_serial) {
2297     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2298   }
2299 
2300   void do_void() {
2301     do {
2302       if (_cm->verbose_high()) {
2303         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2304                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2305       }
2306 
2307       // We call CMTask::do_marking_step() to completely drain the local
2308       // and global marking stacks of entries pushed by the 'keep alive'
2309       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2310       //
2311       // CMTask::do_marking_step() is called in a loop, which we'll exit
2312       // if there's nothing more to do (i.e. we've completely drained the
2313       // entries that were pushed as a a result of applying the 'keep alive'
2314       // closure to the entries on the discovered ref lists) or we overflow
2315       // the global marking stack.
2316       //
2317       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2318       // flag while there may still be some work to do. (See the comment at
2319       // the beginning of CMTask::do_marking_step() for those conditions -
2320       // one of which is reaching the specified time target.) It is only
2321       // when CMTask::do_marking_step() returns without setting the
2322       // has_aborted() flag that the marking step has completed.
2323 
2324       _task->do_marking_step(1000000000.0 /* something very large */,
2325                              true         /* do_termination */,
2326                              _is_serial);
2327     } while (_task->has_aborted() && !_cm->has_overflown());
2328   }
2329 };
2330 
2331 // Implementation of AbstractRefProcTaskExecutor for parallel
2332 // reference processing at the end of G1 concurrent marking
2333 
2334 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2335 private:
2336   G1CollectedHeap* _g1h;
2337   ConcurrentMark*  _cm;
2338   WorkGang*        _workers;
2339   int              _active_workers;
2340 
2341 public:
2342   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2343                         ConcurrentMark* cm,
2344                         WorkGang* workers,
2345                         int n_workers) :
2346     _g1h(g1h), _cm(cm),
2347     _workers(workers), _active_workers(n_workers) { }
2348 
2349   // Executes the given task using concurrent marking worker threads.
2350   virtual void execute(ProcessTask& task);
2351   virtual void execute(EnqueueTask& task);
2352 };
2353 
2354 class G1CMRefProcTaskProxy: public AbstractGangTask {
2355   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2356   ProcessTask&     _proc_task;
2357   G1CollectedHeap* _g1h;
2358   ConcurrentMark*  _cm;
2359 
2360 public:
2361   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2362                      G1CollectedHeap* g1h,
2363                      ConcurrentMark* cm) :
2364     AbstractGangTask("Process reference objects in parallel"),
2365     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2366     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2367     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2368   }
2369 
2370   virtual void work(uint worker_id) {
2371     CMTask* task = _cm->task(worker_id);
2372     G1CMIsAliveClosure g1_is_alive(_g1h);
2373     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2374     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2375 
2376     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2377   }
2378 };
2379 
2380 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2381   assert(_workers != NULL, "Need parallel worker threads.");
2382   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2383 
2384   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2385 
2386   // We need to reset the concurrency level before each
2387   // proxy task execution, so that the termination protocol
2388   // and overflow handling in CMTask::do_marking_step() knows
2389   // how many workers to wait for.
2390   _cm->set_concurrency(_active_workers);
2391   _g1h->set_par_threads(_active_workers);
2392   _workers->run_task(&proc_task_proxy);
2393   _g1h->set_par_threads(0);
2394 }
2395 
2396 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2397   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2398   EnqueueTask& _enq_task;
2399 
2400 public:
2401   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2402     AbstractGangTask("Enqueue reference objects in parallel"),
2403     _enq_task(enq_task) { }
2404 
2405   virtual void work(uint worker_id) {
2406     _enq_task.work(worker_id);
2407   }
2408 };
2409 
2410 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2411   assert(_workers != NULL, "Need parallel worker threads.");
2412   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2413 
2414   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2415 
2416   // Not strictly necessary but...
2417   //
2418   // We need to reset the concurrency level before each
2419   // proxy task execution, so that the termination protocol
2420   // and overflow handling in CMTask::do_marking_step() knows
2421   // how many workers to wait for.
2422   _cm->set_concurrency(_active_workers);
2423   _g1h->set_par_threads(_active_workers);
2424   _workers->run_task(&enq_task_proxy);
2425   _g1h->set_par_threads(0);
2426 }
2427 
2428 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2429   if (has_overflown()) {
2430     // Skip processing the discovered references if we have
2431     // overflown the global marking stack. Reference objects
2432     // only get discovered once so it is OK to not
2433     // de-populate the discovered reference lists. We could have,
2434     // but the only benefit would be that, when marking restarts,
2435     // less reference objects are discovered.
2436     return;
2437   }
2438 
2439   ResourceMark rm;
2440   HandleMark   hm;
2441 
2442   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2443 
2444   // Is alive closure.
2445   G1CMIsAliveClosure g1_is_alive(g1h);
2446 
2447   // Inner scope to exclude the cleaning of the string and symbol
2448   // tables from the displayed time.
2449   {
2450     if (G1Log::finer()) {
2451       gclog_or_tty->put(' ');
2452     }
2453     GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
2454 
2455     ReferenceProcessor* rp = g1h->ref_processor_cm();
2456 
2457     // See the comment in G1CollectedHeap::ref_processing_init()
2458     // about how reference processing currently works in G1.
2459 
2460     // Set the soft reference policy
2461     rp->setup_policy(clear_all_soft_refs);
2462     assert(_markStack.isEmpty(), "mark stack should be empty");
2463 
2464     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2465     // in serial reference processing. Note these closures are also
2466     // used for serially processing (by the the current thread) the
2467     // JNI references during parallel reference processing.
2468     //
2469     // These closures do not need to synchronize with the worker
2470     // threads involved in parallel reference processing as these
2471     // instances are executed serially by the current thread (e.g.
2472     // reference processing is not multi-threaded and is thus
2473     // performed by the current thread instead of a gang worker).
2474     //
2475     // The gang tasks involved in parallel reference processing create
2476     // their own instances of these closures, which do their own
2477     // synchronization among themselves.
2478     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2479     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2480 
2481     // We need at least one active thread. If reference processing
2482     // is not multi-threaded we use the current (VMThread) thread,
2483     // otherwise we use the work gang from the G1CollectedHeap and
2484     // we utilize all the worker threads we can.
2485     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2486     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2487     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2488 
2489     // Parallel processing task executor.
2490     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2491                                               g1h->workers(), active_workers);
2492     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2493 
2494     // Set the concurrency level. The phase was already set prior to
2495     // executing the remark task.
2496     set_concurrency(active_workers);
2497 
2498     // Set the degree of MT processing here.  If the discovery was done MT,
2499     // the number of threads involved during discovery could differ from
2500     // the number of active workers.  This is OK as long as the discovered
2501     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2502     rp->set_active_mt_degree(active_workers);
2503 
2504     // Process the weak references.
2505     const ReferenceProcessorStats& stats =
2506         rp->process_discovered_references(&g1_is_alive,
2507                                           &g1_keep_alive,
2508                                           &g1_drain_mark_stack,
2509                                           executor,
2510                                           g1h->gc_timer_cm());
2511     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2512 
2513     // The do_oop work routines of the keep_alive and drain_marking_stack
2514     // oop closures will set the has_overflown flag if we overflow the
2515     // global marking stack.
2516 
2517     assert(_markStack.overflow() || _markStack.isEmpty(),
2518             "mark stack should be empty (unless it overflowed)");
2519 
2520     if (_markStack.overflow()) {
2521       // This should have been done already when we tried to push an
2522       // entry on to the global mark stack. But let's do it again.
2523       set_has_overflown();
2524     }
2525 
2526     assert(rp->num_q() == active_workers, "why not");
2527 
2528     rp->enqueue_discovered_references(executor);
2529 
2530     rp->verify_no_references_recorded();
2531     assert(!rp->discovery_enabled(), "Post condition");
2532   }
2533 
2534   if (has_overflown()) {
2535     // We can not trust g1_is_alive if the marking stack overflowed
2536     return;
2537   }
2538 
2539   g1h->unlink_string_and_symbol_table(&g1_is_alive,
2540                                       /* process_strings */ false, // currently strings are always roots
2541                                       /* process_symbols */ true);
2542 }
2543 
2544 void ConcurrentMark::swapMarkBitMaps() {
2545   CMBitMapRO* temp = _prevMarkBitMap;
2546   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2547   _nextMarkBitMap  = (CMBitMap*)  temp;
2548 }
2549 
2550 class CMRemarkTask: public AbstractGangTask {
2551 private:
2552   ConcurrentMark* _cm;
2553   bool            _is_serial;
2554 public:
2555   void work(uint worker_id) {
2556     // Since all available tasks are actually started, we should
2557     // only proceed if we're supposed to be active.
2558     if (worker_id < _cm->active_tasks()) {
2559       CMTask* task = _cm->task(worker_id);
2560       task->record_start_time();
2561       do {
2562         task->do_marking_step(1000000000.0 /* something very large */,
2563                               true         /* do_termination       */,
2564                               _is_serial);
2565       } while (task->has_aborted() && !_cm->has_overflown());
2566       // If we overflow, then we do not want to restart. We instead
2567       // want to abort remark and do concurrent marking again.
2568       task->record_end_time();
2569     }
2570   }
2571 
2572   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2573     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2574     _cm->terminator()->reset_for_reuse(active_workers);
2575   }
2576 };
2577 
2578 void ConcurrentMark::checkpointRootsFinalWork() {
2579   ResourceMark rm;
2580   HandleMark   hm;
2581   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2582 
2583   g1h->ensure_parsability(false);
2584 
2585   if (G1CollectedHeap::use_parallel_gc_threads()) {
2586     G1CollectedHeap::StrongRootsScope srs(g1h);
2587     // this is remark, so we'll use up all active threads
2588     uint active_workers = g1h->workers()->active_workers();
2589     if (active_workers == 0) {
2590       assert(active_workers > 0, "Should have been set earlier");
2591       active_workers = (uint) ParallelGCThreads;
2592       g1h->workers()->set_active_workers(active_workers);
2593     }
2594     set_concurrency_and_phase(active_workers, false /* concurrent */);
2595     // Leave _parallel_marking_threads at it's
2596     // value originally calculated in the ConcurrentMark
2597     // constructor and pass values of the active workers
2598     // through the gang in the task.
2599 
2600     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2601     // We will start all available threads, even if we decide that the
2602     // active_workers will be fewer. The extra ones will just bail out
2603     // immediately.
2604     g1h->set_par_threads(active_workers);
2605     g1h->workers()->run_task(&remarkTask);
2606     g1h->set_par_threads(0);
2607   } else {
2608     G1CollectedHeap::StrongRootsScope srs(g1h);
2609     uint active_workers = 1;
2610     set_concurrency_and_phase(active_workers, false /* concurrent */);
2611 
2612     // Note - if there's no work gang then the VMThread will be
2613     // the thread to execute the remark - serially. We have
2614     // to pass true for the is_serial parameter so that
2615     // CMTask::do_marking_step() doesn't enter the sync
2616     // barriers in the event of an overflow. Doing so will
2617     // cause an assert that the current thread is not a
2618     // concurrent GC thread.
2619     CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2620     remarkTask.work(0);
2621   }
2622   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2623   guarantee(has_overflown() ||
2624             satb_mq_set.completed_buffers_num() == 0,
2625             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2626                     BOOL_TO_STR(has_overflown()),
2627                     satb_mq_set.completed_buffers_num()));
2628 
2629   print_stats();
2630 }
2631 
2632 #ifndef PRODUCT
2633 
2634 class PrintReachableOopClosure: public OopClosure {
2635 private:
2636   G1CollectedHeap* _g1h;
2637   outputStream*    _out;
2638   VerifyOption     _vo;
2639   bool             _all;
2640 
2641 public:
2642   PrintReachableOopClosure(outputStream* out,
2643                            VerifyOption  vo,
2644                            bool          all) :
2645     _g1h(G1CollectedHeap::heap()),
2646     _out(out), _vo(vo), _all(all) { }
2647 
2648   void do_oop(narrowOop* p) { do_oop_work(p); }
2649   void do_oop(      oop* p) { do_oop_work(p); }
2650 
2651   template <class T> void do_oop_work(T* p) {
2652     oop         obj = oopDesc::load_decode_heap_oop(p);
2653     const char* str = NULL;
2654     const char* str2 = "";
2655 
2656     if (obj == NULL) {
2657       str = "";
2658     } else if (!_g1h->is_in_g1_reserved(obj)) {
2659       str = " O";
2660     } else {
2661       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2662       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2663       bool marked = _g1h->is_marked(obj, _vo);
2664 
2665       if (over_tams) {
2666         str = " >";
2667         if (marked) {
2668           str2 = " AND MARKED";
2669         }
2670       } else if (marked) {
2671         str = " M";
2672       } else {
2673         str = " NOT";
2674       }
2675     }
2676 
2677     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2678                    p, (void*) obj, str, str2);
2679   }
2680 };
2681 
2682 class PrintReachableObjectClosure : public ObjectClosure {
2683 private:
2684   G1CollectedHeap* _g1h;
2685   outputStream*    _out;
2686   VerifyOption     _vo;
2687   bool             _all;
2688   HeapRegion*      _hr;
2689 
2690 public:
2691   PrintReachableObjectClosure(outputStream* out,
2692                               VerifyOption  vo,
2693                               bool          all,
2694                               HeapRegion*   hr) :
2695     _g1h(G1CollectedHeap::heap()),
2696     _out(out), _vo(vo), _all(all), _hr(hr) { }
2697 
2698   void do_object(oop o) {
2699     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2700     bool marked = _g1h->is_marked(o, _vo);
2701     bool print_it = _all || over_tams || marked;
2702 
2703     if (print_it) {
2704       _out->print_cr(" "PTR_FORMAT"%s",
2705                      (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
2706       PrintReachableOopClosure oopCl(_out, _vo, _all);
2707       o->oop_iterate_no_header(&oopCl);
2708     }
2709   }
2710 };
2711 
2712 class PrintReachableRegionClosure : public HeapRegionClosure {
2713 private:
2714   G1CollectedHeap* _g1h;
2715   outputStream*    _out;
2716   VerifyOption     _vo;
2717   bool             _all;
2718 
2719 public:
2720   bool doHeapRegion(HeapRegion* hr) {
2721     HeapWord* b = hr->bottom();
2722     HeapWord* e = hr->end();
2723     HeapWord* t = hr->top();
2724     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2725     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2726                    "TAMS: "PTR_FORMAT, b, e, t, p);
2727     _out->cr();
2728 
2729     HeapWord* from = b;
2730     HeapWord* to   = t;
2731 
2732     if (to > from) {
2733       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2734       _out->cr();
2735       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2736       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2737       _out->cr();
2738     }
2739 
2740     return false;
2741   }
2742 
2743   PrintReachableRegionClosure(outputStream* out,
2744                               VerifyOption  vo,
2745                               bool          all) :
2746     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2747 };
2748 
2749 void ConcurrentMark::print_reachable(const char* str,
2750                                      VerifyOption vo,
2751                                      bool all) {
2752   gclog_or_tty->cr();
2753   gclog_or_tty->print_cr("== Doing heap dump... ");
2754 
2755   if (G1PrintReachableBaseFile == NULL) {
2756     gclog_or_tty->print_cr("  #### error: no base file defined");
2757     return;
2758   }
2759 
2760   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2761       (JVM_MAXPATHLEN - 1)) {
2762     gclog_or_tty->print_cr("  #### error: file name too long");
2763     return;
2764   }
2765 
2766   char file_name[JVM_MAXPATHLEN];
2767   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2768   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2769 
2770   fileStream fout(file_name);
2771   if (!fout.is_open()) {
2772     gclog_or_tty->print_cr("  #### error: could not open file");
2773     return;
2774   }
2775 
2776   outputStream* out = &fout;
2777   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2778   out->cr();
2779 
2780   out->print_cr("--- ITERATING OVER REGIONS");
2781   out->cr();
2782   PrintReachableRegionClosure rcl(out, vo, all);
2783   _g1h->heap_region_iterate(&rcl);
2784   out->cr();
2785 
2786   gclog_or_tty->print_cr("  done");
2787   gclog_or_tty->flush();
2788 }
2789 
2790 #endif // PRODUCT
2791 
2792 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2793   // Note we are overriding the read-only view of the prev map here, via
2794   // the cast.
2795   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2796 }
2797 
2798 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2799   _nextMarkBitMap->clearRange(mr);
2800 }
2801 
2802 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2803   clearRangePrevBitmap(mr);
2804   clearRangeNextBitmap(mr);
2805 }
2806 
2807 HeapRegion*
2808 ConcurrentMark::claim_region(uint worker_id) {
2809   // "checkpoint" the finger
2810   HeapWord* finger = _finger;
2811 
2812   // _heap_end will not change underneath our feet; it only changes at
2813   // yield points.
2814   while (finger < _heap_end) {
2815     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2816 
2817     // Note on how this code handles humongous regions. In the
2818     // normal case the finger will reach the start of a "starts
2819     // humongous" (SH) region. Its end will either be the end of the
2820     // last "continues humongous" (CH) region in the sequence, or the
2821     // standard end of the SH region (if the SH is the only region in
2822     // the sequence). That way claim_region() will skip over the CH
2823     // regions. However, there is a subtle race between a CM thread
2824     // executing this method and a mutator thread doing a humongous
2825     // object allocation. The two are not mutually exclusive as the CM
2826     // thread does not need to hold the Heap_lock when it gets
2827     // here. So there is a chance that claim_region() will come across
2828     // a free region that's in the progress of becoming a SH or a CH
2829     // region. In the former case, it will either
2830     //   a) Miss the update to the region's end, in which case it will
2831     //      visit every subsequent CH region, will find their bitmaps
2832     //      empty, and do nothing, or
2833     //   b) Will observe the update of the region's end (in which case
2834     //      it will skip the subsequent CH regions).
2835     // If it comes across a region that suddenly becomes CH, the
2836     // scenario will be similar to b). So, the race between
2837     // claim_region() and a humongous object allocation might force us
2838     // to do a bit of unnecessary work (due to some unnecessary bitmap
2839     // iterations) but it should not introduce and correctness issues.
2840     HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
2841     HeapWord*   bottom        = curr_region->bottom();
2842     HeapWord*   end           = curr_region->end();
2843     HeapWord*   limit         = curr_region->next_top_at_mark_start();
2844 
2845     if (verbose_low()) {
2846       gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2847                              "["PTR_FORMAT", "PTR_FORMAT"), "
2848                              "limit = "PTR_FORMAT,
2849                              worker_id, curr_region, bottom, end, limit);
2850     }
2851 
2852     // Is the gap between reading the finger and doing the CAS too long?
2853     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2854     if (res == finger) {
2855       // we succeeded
2856 
2857       // notice that _finger == end cannot be guaranteed here since,
2858       // someone else might have moved the finger even further
2859       assert(_finger >= end, "the finger should have moved forward");
2860 
2861       if (verbose_low()) {
2862         gclog_or_tty->print_cr("[%u] we were successful with region = "
2863                                PTR_FORMAT, worker_id, curr_region);
2864       }
2865 
2866       if (limit > bottom) {
2867         if (verbose_low()) {
2868           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2869                                  "returning it ", worker_id, curr_region);
2870         }
2871         return curr_region;
2872       } else {
2873         assert(limit == bottom,
2874                "the region limit should be at bottom");
2875         if (verbose_low()) {
2876           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2877                                  "returning NULL", worker_id, curr_region);
2878         }
2879         // we return NULL and the caller should try calling
2880         // claim_region() again.
2881         return NULL;
2882       }
2883     } else {
2884       assert(_finger > finger, "the finger should have moved forward");
2885       if (verbose_low()) {
2886         gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2887                                "global finger = "PTR_FORMAT", "
2888                                "our finger = "PTR_FORMAT,
2889                                worker_id, _finger, finger);
2890       }
2891 
2892       // read it again
2893       finger = _finger;
2894     }
2895   }
2896 
2897   return NULL;
2898 }
2899 
2900 #ifndef PRODUCT
2901 enum VerifyNoCSetOopsPhase {
2902   VerifyNoCSetOopsStack,
2903   VerifyNoCSetOopsQueues,
2904   VerifyNoCSetOopsSATBCompleted,
2905   VerifyNoCSetOopsSATBThread
2906 };
2907 
2908 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2909 private:
2910   G1CollectedHeap* _g1h;
2911   VerifyNoCSetOopsPhase _phase;
2912   int _info;
2913 
2914   const char* phase_str() {
2915     switch (_phase) {
2916     case VerifyNoCSetOopsStack:         return "Stack";
2917     case VerifyNoCSetOopsQueues:        return "Queue";
2918     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2919     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2920     default:                            ShouldNotReachHere();
2921     }
2922     return NULL;
2923   }
2924 
2925   void do_object_work(oop obj) {
2926     guarantee(!_g1h->obj_in_cs(obj),
2927               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2928                       (void*) obj, phase_str(), _info));
2929   }
2930 
2931 public:
2932   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2933 
2934   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2935     _phase = phase;
2936     _info = info;
2937   }
2938 
2939   virtual void do_oop(oop* p) {
2940     oop obj = oopDesc::load_decode_heap_oop(p);
2941     do_object_work(obj);
2942   }
2943 
2944   virtual void do_oop(narrowOop* p) {
2945     // We should not come across narrow oops while scanning marking
2946     // stacks and SATB buffers.
2947     ShouldNotReachHere();
2948   }
2949 
2950   virtual void do_object(oop obj) {
2951     do_object_work(obj);
2952   }
2953 };
2954 
2955 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2956                                          bool verify_enqueued_buffers,
2957                                          bool verify_thread_buffers,
2958                                          bool verify_fingers) {
2959   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2960   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2961     return;
2962   }
2963 
2964   VerifyNoCSetOopsClosure cl;
2965 
2966   if (verify_stacks) {
2967     // Verify entries on the global mark stack
2968     cl.set_phase(VerifyNoCSetOopsStack);
2969     _markStack.oops_do(&cl);
2970 
2971     // Verify entries on the task queues
2972     for (uint i = 0; i < _max_worker_id; i += 1) {
2973       cl.set_phase(VerifyNoCSetOopsQueues, i);
2974       CMTaskQueue* queue = _task_queues->queue(i);
2975       queue->oops_do(&cl);
2976     }
2977   }
2978 
2979   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2980 
2981   // Verify entries on the enqueued SATB buffers
2982   if (verify_enqueued_buffers) {
2983     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2984     satb_qs.iterate_completed_buffers_read_only(&cl);
2985   }
2986 
2987   // Verify entries on the per-thread SATB buffers
2988   if (verify_thread_buffers) {
2989     cl.set_phase(VerifyNoCSetOopsSATBThread);
2990     satb_qs.iterate_thread_buffers_read_only(&cl);
2991   }
2992 
2993   if (verify_fingers) {
2994     // Verify the global finger
2995     HeapWord* global_finger = finger();
2996     if (global_finger != NULL && global_finger < _heap_end) {
2997       // The global finger always points to a heap region boundary. We
2998       // use heap_region_containing_raw() to get the containing region
2999       // given that the global finger could be pointing to a free region
3000       // which subsequently becomes continues humongous. If that
3001       // happens, heap_region_containing() will return the bottom of the
3002       // corresponding starts humongous region and the check below will
3003       // not hold any more.
3004       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3005       guarantee(global_finger == global_hr->bottom(),
3006                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3007                         global_finger, HR_FORMAT_PARAMS(global_hr)));
3008     }
3009 
3010     // Verify the task fingers
3011     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3012     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3013       CMTask* task = _tasks[i];
3014       HeapWord* task_finger = task->finger();
3015       if (task_finger != NULL && task_finger < _heap_end) {
3016         // See above note on the global finger verification.
3017         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3018         guarantee(task_finger == task_hr->bottom() ||
3019                   !task_hr->in_collection_set(),
3020                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3021                           task_finger, HR_FORMAT_PARAMS(task_hr)));
3022       }
3023     }
3024   }
3025 }
3026 #endif // PRODUCT
3027 
3028 // Aggregate the counting data that was constructed concurrently
3029 // with marking.
3030 class AggregateCountDataHRClosure: public HeapRegionClosure {
3031   G1CollectedHeap* _g1h;
3032   ConcurrentMark* _cm;
3033   CardTableModRefBS* _ct_bs;
3034   BitMap* _cm_card_bm;
3035   uint _max_worker_id;
3036 
3037  public:
3038   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3039                               BitMap* cm_card_bm,
3040                               uint max_worker_id) :
3041     _g1h(g1h), _cm(g1h->concurrent_mark()),
3042     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3043     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3044 
3045   bool doHeapRegion(HeapRegion* hr) {
3046     if (hr->continuesHumongous()) {
3047       // We will ignore these here and process them when their
3048       // associated "starts humongous" region is processed.
3049       // Note that we cannot rely on their associated
3050       // "starts humongous" region to have their bit set to 1
3051       // since, due to the region chunking in the parallel region
3052       // iteration, a "continues humongous" region might be visited
3053       // before its associated "starts humongous".
3054       return false;
3055     }
3056 
3057     HeapWord* start = hr->bottom();
3058     HeapWord* limit = hr->next_top_at_mark_start();
3059     HeapWord* end = hr->end();
3060 
3061     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3062            err_msg("Preconditions not met - "
3063                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3064                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3065                    start, limit, hr->top(), hr->end()));
3066 
3067     assert(hr->next_marked_bytes() == 0, "Precondition");
3068 
3069     if (start == limit) {
3070       // NTAMS of this region has not been set so nothing to do.
3071       return false;
3072     }
3073 
3074     // 'start' should be in the heap.
3075     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3076     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
3077     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3078 
3079     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3080     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3081     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3082 
3083     // If ntams is not card aligned then we bump card bitmap index
3084     // for limit so that we get the all the cards spanned by
3085     // the object ending at ntams.
3086     // Note: if this is the last region in the heap then ntams
3087     // could be actually just beyond the end of the the heap;
3088     // limit_idx will then  correspond to a (non-existent) card
3089     // that is also outside the heap.
3090     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3091       limit_idx += 1;
3092     }
3093 
3094     assert(limit_idx <= end_idx, "or else use atomics");
3095 
3096     // Aggregate the "stripe" in the count data associated with hr.
3097     uint hrs_index = hr->hrs_index();
3098     size_t marked_bytes = 0;
3099 
3100     for (uint i = 0; i < _max_worker_id; i += 1) {
3101       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3102       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3103 
3104       // Fetch the marked_bytes in this region for task i and
3105       // add it to the running total for this region.
3106       marked_bytes += marked_bytes_array[hrs_index];
3107 
3108       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3109       // into the global card bitmap.
3110       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3111 
3112       while (scan_idx < limit_idx) {
3113         assert(task_card_bm->at(scan_idx) == true, "should be");
3114         _cm_card_bm->set_bit(scan_idx);
3115         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3116 
3117         // BitMap::get_next_one_offset() can handle the case when
3118         // its left_offset parameter is greater than its right_offset
3119         // parameter. It does, however, have an early exit if
3120         // left_offset == right_offset. So let's limit the value
3121         // passed in for left offset here.
3122         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3123         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3124       }
3125     }
3126 
3127     // Update the marked bytes for this region.
3128     hr->add_to_marked_bytes(marked_bytes);
3129 
3130     // Next heap region
3131     return false;
3132   }
3133 };
3134 
3135 class G1AggregateCountDataTask: public AbstractGangTask {
3136 protected:
3137   G1CollectedHeap* _g1h;
3138   ConcurrentMark* _cm;
3139   BitMap* _cm_card_bm;
3140   uint _max_worker_id;
3141   int _active_workers;
3142 
3143 public:
3144   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3145                            ConcurrentMark* cm,
3146                            BitMap* cm_card_bm,
3147                            uint max_worker_id,
3148                            int n_workers) :
3149     AbstractGangTask("Count Aggregation"),
3150     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3151     _max_worker_id(max_worker_id),
3152     _active_workers(n_workers) { }
3153 
3154   void work(uint worker_id) {
3155     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3156 
3157     if (G1CollectedHeap::use_parallel_gc_threads()) {
3158       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3159                                             _active_workers,
3160                                             HeapRegion::AggregateCountClaimValue);
3161     } else {
3162       _g1h->heap_region_iterate(&cl);
3163     }
3164   }
3165 };
3166 
3167 
3168 void ConcurrentMark::aggregate_count_data() {
3169   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3170                         _g1h->workers()->active_workers() :
3171                         1);
3172 
3173   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3174                                            _max_worker_id, n_workers);
3175 
3176   if (G1CollectedHeap::use_parallel_gc_threads()) {
3177     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3178            "sanity check");
3179     _g1h->set_par_threads(n_workers);
3180     _g1h->workers()->run_task(&g1_par_agg_task);
3181     _g1h->set_par_threads(0);
3182 
3183     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3184            "sanity check");
3185     _g1h->reset_heap_region_claim_values();
3186   } else {
3187     g1_par_agg_task.work(0);
3188   }
3189 }
3190 
3191 // Clear the per-worker arrays used to store the per-region counting data
3192 void ConcurrentMark::clear_all_count_data() {
3193   // Clear the global card bitmap - it will be filled during
3194   // liveness count aggregation (during remark) and the
3195   // final counting task.
3196   _card_bm.clear();
3197 
3198   // Clear the global region bitmap - it will be filled as part
3199   // of the final counting task.
3200   _region_bm.clear();
3201 
3202   uint max_regions = _g1h->max_regions();
3203   assert(_max_worker_id > 0, "uninitialized");
3204 
3205   for (uint i = 0; i < _max_worker_id; i += 1) {
3206     BitMap* task_card_bm = count_card_bitmap_for(i);
3207     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3208 
3209     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3210     assert(marked_bytes_array != NULL, "uninitialized");
3211 
3212     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3213     task_card_bm->clear();
3214   }
3215 }
3216 
3217 void ConcurrentMark::print_stats() {
3218   if (verbose_stats()) {
3219     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3220     for (size_t i = 0; i < _active_tasks; ++i) {
3221       _tasks[i]->print_stats();
3222       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3223     }
3224   }
3225 }
3226 
3227 // abandon current marking iteration due to a Full GC
3228 void ConcurrentMark::abort() {
3229   // Clear all marks to force marking thread to do nothing
3230   _nextMarkBitMap->clearAll();
3231 
3232   // Note we cannot clear the previous marking bitmap here
3233   // since VerifyDuringGC verifies the objects marked during
3234   // a full GC against the previous bitmap.
3235 
3236   // Clear the liveness counting data
3237   clear_all_count_data();
3238   // Empty mark stack
3239   reset_marking_state();
3240   for (uint i = 0; i < _max_worker_id; ++i) {
3241     _tasks[i]->clear_region_fields();
3242   }
3243   _has_aborted = true;
3244 
3245   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3246   satb_mq_set.abandon_partial_marking();
3247   // This can be called either during or outside marking, we'll read
3248   // the expected_active value from the SATB queue set.
3249   satb_mq_set.set_active_all_threads(
3250                                  false, /* new active value */
3251                                  satb_mq_set.is_active() /* expected_active */);
3252 
3253   _g1h->trace_heap_after_concurrent_cycle();
3254   _g1h->register_concurrent_cycle_end();
3255 }
3256 
3257 static void print_ms_time_info(const char* prefix, const char* name,
3258                                NumberSeq& ns) {
3259   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3260                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3261   if (ns.num() > 0) {
3262     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3263                            prefix, ns.sd(), ns.maximum());
3264   }
3265 }
3266 
3267 void ConcurrentMark::print_summary_info() {
3268   gclog_or_tty->print_cr(" Concurrent marking:");
3269   print_ms_time_info("  ", "init marks", _init_times);
3270   print_ms_time_info("  ", "remarks", _remark_times);
3271   {
3272     print_ms_time_info("     ", "final marks", _remark_mark_times);
3273     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3274 
3275   }
3276   print_ms_time_info("  ", "cleanups", _cleanup_times);
3277   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3278                          _total_counting_time,
3279                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3280                           (double)_cleanup_times.num()
3281                          : 0.0));
3282   if (G1ScrubRemSets) {
3283     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3284                            _total_rs_scrub_time,
3285                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3286                             (double)_cleanup_times.num()
3287                            : 0.0));
3288   }
3289   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3290                          (_init_times.sum() + _remark_times.sum() +
3291                           _cleanup_times.sum())/1000.0);
3292   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3293                 "(%8.2f s marking).",
3294                 cmThread()->vtime_accum(),
3295                 cmThread()->vtime_mark_accum());
3296 }
3297 
3298 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3299   if (use_parallel_marking_threads()) {
3300     _parallel_workers->print_worker_threads_on(st);
3301   }
3302 }
3303 
3304 void ConcurrentMark::print_on_error(outputStream* st) const {
3305   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3306       _prevMarkBitMap, _nextMarkBitMap);
3307   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3308   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3309 }
3310 
3311 // We take a break if someone is trying to stop the world.
3312 bool ConcurrentMark::do_yield_check(uint worker_id) {
3313   if (SuspendibleThreadSet::should_yield()) {
3314     if (worker_id == 0) {
3315       _g1h->g1_policy()->record_concurrent_pause();
3316     }
3317     SuspendibleThreadSet::yield();
3318     return true;
3319   } else {
3320     return false;
3321   }
3322 }
3323 
3324 bool ConcurrentMark::containing_card_is_marked(void* p) {
3325   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3326   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3327 }
3328 
3329 bool ConcurrentMark::containing_cards_are_marked(void* start,
3330                                                  void* last) {
3331   return containing_card_is_marked(start) &&
3332          containing_card_is_marked(last);
3333 }
3334 
3335 #ifndef PRODUCT
3336 // for debugging purposes
3337 void ConcurrentMark::print_finger() {
3338   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3339                          _heap_start, _heap_end, _finger);
3340   for (uint i = 0; i < _max_worker_id; ++i) {
3341     gclog_or_tty->print("   %u: "PTR_FORMAT, i, _tasks[i]->finger());
3342   }
3343   gclog_or_tty->print_cr("");
3344 }
3345 #endif
3346 
3347 void CMTask::scan_object(oop obj) {
3348   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3349 
3350   if (_cm->verbose_high()) {
3351     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3352                            _worker_id, (void*) obj);
3353   }
3354 
3355   size_t obj_size = obj->size();
3356   _words_scanned += obj_size;
3357 
3358   obj->oop_iterate(_cm_oop_closure);
3359   statsOnly( ++_objs_scanned );
3360   check_limits();
3361 }
3362 
3363 // Closure for iteration over bitmaps
3364 class CMBitMapClosure : public BitMapClosure {
3365 private:
3366   // the bitmap that is being iterated over
3367   CMBitMap*                   _nextMarkBitMap;
3368   ConcurrentMark*             _cm;
3369   CMTask*                     _task;
3370 
3371 public:
3372   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3373     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3374 
3375   bool do_bit(size_t offset) {
3376     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3377     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3378     assert( addr < _cm->finger(), "invariant");
3379 
3380     statsOnly( _task->increase_objs_found_on_bitmap() );
3381     assert(addr >= _task->finger(), "invariant");
3382 
3383     // We move that task's local finger along.
3384     _task->move_finger_to(addr);
3385 
3386     _task->scan_object(oop(addr));
3387     // we only partially drain the local queue and global stack
3388     _task->drain_local_queue(true);
3389     _task->drain_global_stack(true);
3390 
3391     // if the has_aborted flag has been raised, we need to bail out of
3392     // the iteration
3393     return !_task->has_aborted();
3394   }
3395 };
3396 
3397 // Closure for iterating over objects, currently only used for
3398 // processing SATB buffers.
3399 class CMObjectClosure : public ObjectClosure {
3400 private:
3401   CMTask* _task;
3402 
3403 public:
3404   void do_object(oop obj) {
3405     _task->deal_with_reference(obj);
3406   }
3407 
3408   CMObjectClosure(CMTask* task) : _task(task) { }
3409 };
3410 
3411 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3412                                ConcurrentMark* cm,
3413                                CMTask* task)
3414   : _g1h(g1h), _cm(cm), _task(task) {
3415   assert(_ref_processor == NULL, "should be initialized to NULL");
3416 
3417   if (G1UseConcMarkReferenceProcessing) {
3418     _ref_processor = g1h->ref_processor_cm();
3419     assert(_ref_processor != NULL, "should not be NULL");
3420   }
3421 }
3422 
3423 void CMTask::setup_for_region(HeapRegion* hr) {
3424   assert(hr != NULL,
3425         "claim_region() should have filtered out NULL regions");
3426   assert(!hr->continuesHumongous(),
3427         "claim_region() should have filtered out continues humongous regions");
3428 
3429   if (_cm->verbose_low()) {
3430     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3431                            _worker_id, hr);
3432   }
3433 
3434   _curr_region  = hr;
3435   _finger       = hr->bottom();
3436   update_region_limit();
3437 }
3438 
3439 void CMTask::update_region_limit() {
3440   HeapRegion* hr            = _curr_region;
3441   HeapWord* bottom          = hr->bottom();
3442   HeapWord* limit           = hr->next_top_at_mark_start();
3443 
3444   if (limit == bottom) {
3445     if (_cm->verbose_low()) {
3446       gclog_or_tty->print_cr("[%u] found an empty region "
3447                              "["PTR_FORMAT", "PTR_FORMAT")",
3448                              _worker_id, bottom, limit);
3449     }
3450     // The region was collected underneath our feet.
3451     // We set the finger to bottom to ensure that the bitmap
3452     // iteration that will follow this will not do anything.
3453     // (this is not a condition that holds when we set the region up,
3454     // as the region is not supposed to be empty in the first place)
3455     _finger = bottom;
3456   } else if (limit >= _region_limit) {
3457     assert(limit >= _finger, "peace of mind");
3458   } else {
3459     assert(limit < _region_limit, "only way to get here");
3460     // This can happen under some pretty unusual circumstances.  An
3461     // evacuation pause empties the region underneath our feet (NTAMS
3462     // at bottom). We then do some allocation in the region (NTAMS
3463     // stays at bottom), followed by the region being used as a GC
3464     // alloc region (NTAMS will move to top() and the objects
3465     // originally below it will be grayed). All objects now marked in
3466     // the region are explicitly grayed, if below the global finger,
3467     // and we do not need in fact to scan anything else. So, we simply
3468     // set _finger to be limit to ensure that the bitmap iteration
3469     // doesn't do anything.
3470     _finger = limit;
3471   }
3472 
3473   _region_limit = limit;
3474 }
3475 
3476 void CMTask::giveup_current_region() {
3477   assert(_curr_region != NULL, "invariant");
3478   if (_cm->verbose_low()) {
3479     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3480                            _worker_id, _curr_region);
3481   }
3482   clear_region_fields();
3483 }
3484 
3485 void CMTask::clear_region_fields() {
3486   // Values for these three fields that indicate that we're not
3487   // holding on to a region.
3488   _curr_region   = NULL;
3489   _finger        = NULL;
3490   _region_limit  = NULL;
3491 }
3492 
3493 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3494   if (cm_oop_closure == NULL) {
3495     assert(_cm_oop_closure != NULL, "invariant");
3496   } else {
3497     assert(_cm_oop_closure == NULL, "invariant");
3498   }
3499   _cm_oop_closure = cm_oop_closure;
3500 }
3501 
3502 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3503   guarantee(nextMarkBitMap != NULL, "invariant");
3504 
3505   if (_cm->verbose_low()) {
3506     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3507   }
3508 
3509   _nextMarkBitMap                = nextMarkBitMap;
3510   clear_region_fields();
3511 
3512   _calls                         = 0;
3513   _elapsed_time_ms               = 0.0;
3514   _termination_time_ms           = 0.0;
3515   _termination_start_time_ms     = 0.0;
3516 
3517 #if _MARKING_STATS_
3518   _local_pushes                  = 0;
3519   _local_pops                    = 0;
3520   _local_max_size                = 0;
3521   _objs_scanned                  = 0;
3522   _global_pushes                 = 0;
3523   _global_pops                   = 0;
3524   _global_max_size               = 0;
3525   _global_transfers_to           = 0;
3526   _global_transfers_from         = 0;
3527   _regions_claimed               = 0;
3528   _objs_found_on_bitmap          = 0;
3529   _satb_buffers_processed        = 0;
3530   _steal_attempts                = 0;
3531   _steals                        = 0;
3532   _aborted                       = 0;
3533   _aborted_overflow              = 0;
3534   _aborted_cm_aborted            = 0;
3535   _aborted_yield                 = 0;
3536   _aborted_timed_out             = 0;
3537   _aborted_satb                  = 0;
3538   _aborted_termination           = 0;
3539 #endif // _MARKING_STATS_
3540 }
3541 
3542 bool CMTask::should_exit_termination() {
3543   regular_clock_call();
3544   // This is called when we are in the termination protocol. We should
3545   // quit if, for some reason, this task wants to abort or the global
3546   // stack is not empty (this means that we can get work from it).
3547   return !_cm->mark_stack_empty() || has_aborted();
3548 }
3549 
3550 void CMTask::reached_limit() {
3551   assert(_words_scanned >= _words_scanned_limit ||
3552          _refs_reached >= _refs_reached_limit ,
3553          "shouldn't have been called otherwise");
3554   regular_clock_call();
3555 }
3556 
3557 void CMTask::regular_clock_call() {
3558   if (has_aborted()) return;
3559 
3560   // First, we need to recalculate the words scanned and refs reached
3561   // limits for the next clock call.
3562   recalculate_limits();
3563 
3564   // During the regular clock call we do the following
3565 
3566   // (1) If an overflow has been flagged, then we abort.
3567   if (_cm->has_overflown()) {
3568     set_has_aborted();
3569     return;
3570   }
3571 
3572   // If we are not concurrent (i.e. we're doing remark) we don't need
3573   // to check anything else. The other steps are only needed during
3574   // the concurrent marking phase.
3575   if (!concurrent()) return;
3576 
3577   // (2) If marking has been aborted for Full GC, then we also abort.
3578   if (_cm->has_aborted()) {
3579     set_has_aborted();
3580     statsOnly( ++_aborted_cm_aborted );
3581     return;
3582   }
3583 
3584   double curr_time_ms = os::elapsedVTime() * 1000.0;
3585 
3586   // (3) If marking stats are enabled, then we update the step history.
3587 #if _MARKING_STATS_
3588   if (_words_scanned >= _words_scanned_limit) {
3589     ++_clock_due_to_scanning;
3590   }
3591   if (_refs_reached >= _refs_reached_limit) {
3592     ++_clock_due_to_marking;
3593   }
3594 
3595   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3596   _interval_start_time_ms = curr_time_ms;
3597   _all_clock_intervals_ms.add(last_interval_ms);
3598 
3599   if (_cm->verbose_medium()) {
3600       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3601                         "scanned = %d%s, refs reached = %d%s",
3602                         _worker_id, last_interval_ms,
3603                         _words_scanned,
3604                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3605                         _refs_reached,
3606                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3607   }
3608 #endif // _MARKING_STATS_
3609 
3610   // (4) We check whether we should yield. If we have to, then we abort.
3611   if (SuspendibleThreadSet::should_yield()) {
3612     // We should yield. To do this we abort the task. The caller is
3613     // responsible for yielding.
3614     set_has_aborted();
3615     statsOnly( ++_aborted_yield );
3616     return;
3617   }
3618 
3619   // (5) We check whether we've reached our time quota. If we have,
3620   // then we abort.
3621   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3622   if (elapsed_time_ms > _time_target_ms) {
3623     set_has_aborted();
3624     _has_timed_out = true;
3625     statsOnly( ++_aborted_timed_out );
3626     return;
3627   }
3628 
3629   // (6) Finally, we check whether there are enough completed STAB
3630   // buffers available for processing. If there are, we abort.
3631   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3632   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3633     if (_cm->verbose_low()) {
3634       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3635                              _worker_id);
3636     }
3637     // we do need to process SATB buffers, we'll abort and restart
3638     // the marking task to do so
3639     set_has_aborted();
3640     statsOnly( ++_aborted_satb );
3641     return;
3642   }
3643 }
3644 
3645 void CMTask::recalculate_limits() {
3646   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3647   _words_scanned_limit      = _real_words_scanned_limit;
3648 
3649   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3650   _refs_reached_limit       = _real_refs_reached_limit;
3651 }
3652 
3653 void CMTask::decrease_limits() {
3654   // This is called when we believe that we're going to do an infrequent
3655   // operation which will increase the per byte scanned cost (i.e. move
3656   // entries to/from the global stack). It basically tries to decrease the
3657   // scanning limit so that the clock is called earlier.
3658 
3659   if (_cm->verbose_medium()) {
3660     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3661   }
3662 
3663   _words_scanned_limit = _real_words_scanned_limit -
3664     3 * words_scanned_period / 4;
3665   _refs_reached_limit  = _real_refs_reached_limit -
3666     3 * refs_reached_period / 4;
3667 }
3668 
3669 void CMTask::move_entries_to_global_stack() {
3670   // local array where we'll store the entries that will be popped
3671   // from the local queue
3672   oop buffer[global_stack_transfer_size];
3673 
3674   int n = 0;
3675   oop obj;
3676   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3677     buffer[n] = obj;
3678     ++n;
3679   }
3680 
3681   if (n > 0) {
3682     // we popped at least one entry from the local queue
3683 
3684     statsOnly( ++_global_transfers_to; _local_pops += n );
3685 
3686     if (!_cm->mark_stack_push(buffer, n)) {
3687       if (_cm->verbose_low()) {
3688         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3689                                _worker_id);
3690       }
3691       set_has_aborted();
3692     } else {
3693       // the transfer was successful
3694 
3695       if (_cm->verbose_medium()) {
3696         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3697                                _worker_id, n);
3698       }
3699       statsOnly( int tmp_size = _cm->mark_stack_size();
3700                  if (tmp_size > _global_max_size) {
3701                    _global_max_size = tmp_size;
3702                  }
3703                  _global_pushes += n );
3704     }
3705   }
3706 
3707   // this operation was quite expensive, so decrease the limits
3708   decrease_limits();
3709 }
3710 
3711 void CMTask::get_entries_from_global_stack() {
3712   // local array where we'll store the entries that will be popped
3713   // from the global stack.
3714   oop buffer[global_stack_transfer_size];
3715   int n;
3716   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3717   assert(n <= global_stack_transfer_size,
3718          "we should not pop more than the given limit");
3719   if (n > 0) {
3720     // yes, we did actually pop at least one entry
3721 
3722     statsOnly( ++_global_transfers_from; _global_pops += n );
3723     if (_cm->verbose_medium()) {
3724       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3725                              _worker_id, n);
3726     }
3727     for (int i = 0; i < n; ++i) {
3728       bool success = _task_queue->push(buffer[i]);
3729       // We only call this when the local queue is empty or under a
3730       // given target limit. So, we do not expect this push to fail.
3731       assert(success, "invariant");
3732     }
3733 
3734     statsOnly( int tmp_size = _task_queue->size();
3735                if (tmp_size > _local_max_size) {
3736                  _local_max_size = tmp_size;
3737                }
3738                _local_pushes += n );
3739   }
3740 
3741   // this operation was quite expensive, so decrease the limits
3742   decrease_limits();
3743 }
3744 
3745 void CMTask::drain_local_queue(bool partially) {
3746   if (has_aborted()) return;
3747 
3748   // Decide what the target size is, depending whether we're going to
3749   // drain it partially (so that other tasks can steal if they run out
3750   // of things to do) or totally (at the very end).
3751   size_t target_size;
3752   if (partially) {
3753     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3754   } else {
3755     target_size = 0;
3756   }
3757 
3758   if (_task_queue->size() > target_size) {
3759     if (_cm->verbose_high()) {
3760       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3761                              _worker_id, target_size);
3762     }
3763 
3764     oop obj;
3765     bool ret = _task_queue->pop_local(obj);
3766     while (ret) {
3767       statsOnly( ++_local_pops );
3768 
3769       if (_cm->verbose_high()) {
3770         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3771                                (void*) obj);
3772       }
3773 
3774       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3775       assert(!_g1h->is_on_master_free_list(
3776                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3777 
3778       scan_object(obj);
3779 
3780       if (_task_queue->size() <= target_size || has_aborted()) {
3781         ret = false;
3782       } else {
3783         ret = _task_queue->pop_local(obj);
3784       }
3785     }
3786 
3787     if (_cm->verbose_high()) {
3788       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3789                              _worker_id, _task_queue->size());
3790     }
3791   }
3792 }
3793 
3794 void CMTask::drain_global_stack(bool partially) {
3795   if (has_aborted()) return;
3796 
3797   // We have a policy to drain the local queue before we attempt to
3798   // drain the global stack.
3799   assert(partially || _task_queue->size() == 0, "invariant");
3800 
3801   // Decide what the target size is, depending whether we're going to
3802   // drain it partially (so that other tasks can steal if they run out
3803   // of things to do) or totally (at the very end).  Notice that,
3804   // because we move entries from the global stack in chunks or
3805   // because another task might be doing the same, we might in fact
3806   // drop below the target. But, this is not a problem.
3807   size_t target_size;
3808   if (partially) {
3809     target_size = _cm->partial_mark_stack_size_target();
3810   } else {
3811     target_size = 0;
3812   }
3813 
3814   if (_cm->mark_stack_size() > target_size) {
3815     if (_cm->verbose_low()) {
3816       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3817                              _worker_id, target_size);
3818     }
3819 
3820     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3821       get_entries_from_global_stack();
3822       drain_local_queue(partially);
3823     }
3824 
3825     if (_cm->verbose_low()) {
3826       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3827                              _worker_id, _cm->mark_stack_size());
3828     }
3829   }
3830 }
3831 
3832 // SATB Queue has several assumptions on whether to call the par or
3833 // non-par versions of the methods. this is why some of the code is
3834 // replicated. We should really get rid of the single-threaded version
3835 // of the code to simplify things.
3836 void CMTask::drain_satb_buffers() {
3837   if (has_aborted()) return;
3838 
3839   // We set this so that the regular clock knows that we're in the
3840   // middle of draining buffers and doesn't set the abort flag when it
3841   // notices that SATB buffers are available for draining. It'd be
3842   // very counter productive if it did that. :-)
3843   _draining_satb_buffers = true;
3844 
3845   CMObjectClosure oc(this);
3846   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3847   if (G1CollectedHeap::use_parallel_gc_threads()) {
3848     satb_mq_set.set_par_closure(_worker_id, &oc);
3849   } else {
3850     satb_mq_set.set_closure(&oc);
3851   }
3852 
3853   // This keeps claiming and applying the closure to completed buffers
3854   // until we run out of buffers or we need to abort.
3855   if (G1CollectedHeap::use_parallel_gc_threads()) {
3856     while (!has_aborted() &&
3857            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3858       if (_cm->verbose_medium()) {
3859         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3860       }
3861       statsOnly( ++_satb_buffers_processed );
3862       regular_clock_call();
3863     }
3864   } else {
3865     while (!has_aborted() &&
3866            satb_mq_set.apply_closure_to_completed_buffer()) {
3867       if (_cm->verbose_medium()) {
3868         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3869       }
3870       statsOnly( ++_satb_buffers_processed );
3871       regular_clock_call();
3872     }
3873   }
3874 
3875   if (!concurrent() && !has_aborted()) {
3876     // We should only do this during remark.
3877     if (G1CollectedHeap::use_parallel_gc_threads()) {
3878       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3879     } else {
3880       satb_mq_set.iterate_closure_all_threads();
3881     }
3882   }
3883 
3884   _draining_satb_buffers = false;
3885 
3886   assert(has_aborted() ||
3887          concurrent() ||
3888          satb_mq_set.completed_buffers_num() == 0, "invariant");
3889 
3890   if (G1CollectedHeap::use_parallel_gc_threads()) {
3891     satb_mq_set.set_par_closure(_worker_id, NULL);
3892   } else {
3893     satb_mq_set.set_closure(NULL);
3894   }
3895 
3896   // again, this was a potentially expensive operation, decrease the
3897   // limits to get the regular clock call early
3898   decrease_limits();
3899 }
3900 
3901 void CMTask::print_stats() {
3902   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3903                          _worker_id, _calls);
3904   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3905                          _elapsed_time_ms, _termination_time_ms);
3906   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3907                          _step_times_ms.num(), _step_times_ms.avg(),
3908                          _step_times_ms.sd());
3909   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3910                          _step_times_ms.maximum(), _step_times_ms.sum());
3911 
3912 #if _MARKING_STATS_
3913   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3914                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3915                          _all_clock_intervals_ms.sd());
3916   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3917                          _all_clock_intervals_ms.maximum(),
3918                          _all_clock_intervals_ms.sum());
3919   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
3920                          _clock_due_to_scanning, _clock_due_to_marking);
3921   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
3922                          _objs_scanned, _objs_found_on_bitmap);
3923   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
3924                          _local_pushes, _local_pops, _local_max_size);
3925   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
3926                          _global_pushes, _global_pops, _global_max_size);
3927   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
3928                          _global_transfers_to,_global_transfers_from);
3929   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
3930   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
3931   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
3932                          _steal_attempts, _steals);
3933   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
3934   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
3935                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3936   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
3937                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3938 #endif // _MARKING_STATS_
3939 }
3940 
3941 /*****************************************************************************
3942 
3943     The do_marking_step(time_target_ms, ...) method is the building
3944     block of the parallel marking framework. It can be called in parallel
3945     with other invocations of do_marking_step() on different tasks
3946     (but only one per task, obviously) and concurrently with the
3947     mutator threads, or during remark, hence it eliminates the need
3948     for two versions of the code. When called during remark, it will
3949     pick up from where the task left off during the concurrent marking
3950     phase. Interestingly, tasks are also claimable during evacuation
3951     pauses too, since do_marking_step() ensures that it aborts before
3952     it needs to yield.
3953 
3954     The data structures that it uses to do marking work are the
3955     following:
3956 
3957       (1) Marking Bitmap. If there are gray objects that appear only
3958       on the bitmap (this happens either when dealing with an overflow
3959       or when the initial marking phase has simply marked the roots
3960       and didn't push them on the stack), then tasks claim heap
3961       regions whose bitmap they then scan to find gray objects. A
3962       global finger indicates where the end of the last claimed region
3963       is. A local finger indicates how far into the region a task has
3964       scanned. The two fingers are used to determine how to gray an
3965       object (i.e. whether simply marking it is OK, as it will be
3966       visited by a task in the future, or whether it needs to be also
3967       pushed on a stack).
3968 
3969       (2) Local Queue. The local queue of the task which is accessed
3970       reasonably efficiently by the task. Other tasks can steal from
3971       it when they run out of work. Throughout the marking phase, a
3972       task attempts to keep its local queue short but not totally
3973       empty, so that entries are available for stealing by other
3974       tasks. Only when there is no more work, a task will totally
3975       drain its local queue.
3976 
3977       (3) Global Mark Stack. This handles local queue overflow. During
3978       marking only sets of entries are moved between it and the local
3979       queues, as access to it requires a mutex and more fine-grain
3980       interaction with it which might cause contention. If it
3981       overflows, then the marking phase should restart and iterate
3982       over the bitmap to identify gray objects. Throughout the marking
3983       phase, tasks attempt to keep the global mark stack at a small
3984       length but not totally empty, so that entries are available for
3985       popping by other tasks. Only when there is no more work, tasks
3986       will totally drain the global mark stack.
3987 
3988       (4) SATB Buffer Queue. This is where completed SATB buffers are
3989       made available. Buffers are regularly removed from this queue
3990       and scanned for roots, so that the queue doesn't get too
3991       long. During remark, all completed buffers are processed, as
3992       well as the filled in parts of any uncompleted buffers.
3993 
3994     The do_marking_step() method tries to abort when the time target
3995     has been reached. There are a few other cases when the
3996     do_marking_step() method also aborts:
3997 
3998       (1) When the marking phase has been aborted (after a Full GC).
3999 
4000       (2) When a global overflow (on the global stack) has been
4001       triggered. Before the task aborts, it will actually sync up with
4002       the other tasks to ensure that all the marking data structures
4003       (local queues, stacks, fingers etc.)  are re-initialized so that
4004       when do_marking_step() completes, the marking phase can
4005       immediately restart.
4006 
4007       (3) When enough completed SATB buffers are available. The
4008       do_marking_step() method only tries to drain SATB buffers right
4009       at the beginning. So, if enough buffers are available, the
4010       marking step aborts and the SATB buffers are processed at
4011       the beginning of the next invocation.
4012 
4013       (4) To yield. when we have to yield then we abort and yield
4014       right at the end of do_marking_step(). This saves us from a lot
4015       of hassle as, by yielding we might allow a Full GC. If this
4016       happens then objects will be compacted underneath our feet, the
4017       heap might shrink, etc. We save checking for this by just
4018       aborting and doing the yield right at the end.
4019 
4020     From the above it follows that the do_marking_step() method should
4021     be called in a loop (or, otherwise, regularly) until it completes.
4022 
4023     If a marking step completes without its has_aborted() flag being
4024     true, it means it has completed the current marking phase (and
4025     also all other marking tasks have done so and have all synced up).
4026 
4027     A method called regular_clock_call() is invoked "regularly" (in
4028     sub ms intervals) throughout marking. It is this clock method that
4029     checks all the abort conditions which were mentioned above and
4030     decides when the task should abort. A work-based scheme is used to
4031     trigger this clock method: when the number of object words the
4032     marking phase has scanned or the number of references the marking
4033     phase has visited reach a given limit. Additional invocations to
4034     the method clock have been planted in a few other strategic places
4035     too. The initial reason for the clock method was to avoid calling
4036     vtime too regularly, as it is quite expensive. So, once it was in
4037     place, it was natural to piggy-back all the other conditions on it
4038     too and not constantly check them throughout the code.
4039 
4040     If do_termination is true then do_marking_step will enter its
4041     termination protocol.
4042 
4043     The value of is_serial must be true when do_marking_step is being
4044     called serially (i.e. by the VMThread) and do_marking_step should
4045     skip any synchronization in the termination and overflow code.
4046     Examples include the serial remark code and the serial reference
4047     processing closures.
4048 
4049     The value of is_serial must be false when do_marking_step is
4050     being called by any of the worker threads in a work gang.
4051     Examples include the concurrent marking code (CMMarkingTask),
4052     the MT remark code, and the MT reference processing closures.
4053 
4054  *****************************************************************************/
4055 
4056 void CMTask::do_marking_step(double time_target_ms,
4057                              bool do_termination,
4058                              bool is_serial) {
4059   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4060   assert(concurrent() == _cm->concurrent(), "they should be the same");
4061 
4062   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4063   assert(_task_queues != NULL, "invariant");
4064   assert(_task_queue != NULL, "invariant");
4065   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4066 
4067   assert(!_claimed,
4068          "only one thread should claim this task at any one time");
4069 
4070   // OK, this doesn't safeguard again all possible scenarios, as it is
4071   // possible for two threads to set the _claimed flag at the same
4072   // time. But it is only for debugging purposes anyway and it will
4073   // catch most problems.
4074   _claimed = true;
4075 
4076   _start_time_ms = os::elapsedVTime() * 1000.0;
4077   statsOnly( _interval_start_time_ms = _start_time_ms );
4078 
4079   // If do_stealing is true then do_marking_step will attempt to
4080   // steal work from the other CMTasks. It only makes sense to
4081   // enable stealing when the termination protocol is enabled
4082   // and do_marking_step() is not being called serially.
4083   bool do_stealing = do_termination && !is_serial;
4084 
4085   double diff_prediction_ms =
4086     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4087   _time_target_ms = time_target_ms - diff_prediction_ms;
4088 
4089   // set up the variables that are used in the work-based scheme to
4090   // call the regular clock method
4091   _words_scanned = 0;
4092   _refs_reached  = 0;
4093   recalculate_limits();
4094 
4095   // clear all flags
4096   clear_has_aborted();
4097   _has_timed_out = false;
4098   _draining_satb_buffers = false;
4099 
4100   ++_calls;
4101 
4102   if (_cm->verbose_low()) {
4103     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4104                            "target = %1.2lfms >>>>>>>>>>",
4105                            _worker_id, _calls, _time_target_ms);
4106   }
4107 
4108   // Set up the bitmap and oop closures. Anything that uses them is
4109   // eventually called from this method, so it is OK to allocate these
4110   // statically.
4111   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4112   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4113   set_cm_oop_closure(&cm_oop_closure);
4114 
4115   if (_cm->has_overflown()) {
4116     // This can happen if the mark stack overflows during a GC pause
4117     // and this task, after a yield point, restarts. We have to abort
4118     // as we need to get into the overflow protocol which happens
4119     // right at the end of this task.
4120     set_has_aborted();
4121   }
4122 
4123   // First drain any available SATB buffers. After this, we will not
4124   // look at SATB buffers before the next invocation of this method.
4125   // If enough completed SATB buffers are queued up, the regular clock
4126   // will abort this task so that it restarts.
4127   drain_satb_buffers();
4128   // ...then partially drain the local queue and the global stack
4129   drain_local_queue(true);
4130   drain_global_stack(true);
4131 
4132   do {
4133     if (!has_aborted() && _curr_region != NULL) {
4134       // This means that we're already holding on to a region.
4135       assert(_finger != NULL, "if region is not NULL, then the finger "
4136              "should not be NULL either");
4137 
4138       // We might have restarted this task after an evacuation pause
4139       // which might have evacuated the region we're holding on to
4140       // underneath our feet. Let's read its limit again to make sure
4141       // that we do not iterate over a region of the heap that
4142       // contains garbage (update_region_limit() will also move
4143       // _finger to the start of the region if it is found empty).
4144       update_region_limit();
4145       // We will start from _finger not from the start of the region,
4146       // as we might be restarting this task after aborting half-way
4147       // through scanning this region. In this case, _finger points to
4148       // the address where we last found a marked object. If this is a
4149       // fresh region, _finger points to start().
4150       MemRegion mr = MemRegion(_finger, _region_limit);
4151 
4152       if (_cm->verbose_low()) {
4153         gclog_or_tty->print_cr("[%u] we're scanning part "
4154                                "["PTR_FORMAT", "PTR_FORMAT") "
4155                                "of region "HR_FORMAT,
4156                                _worker_id, _finger, _region_limit,
4157                                HR_FORMAT_PARAMS(_curr_region));
4158       }
4159 
4160       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4161              "humongous regions should go around loop once only");
4162 
4163       // Some special cases:
4164       // If the memory region is empty, we can just give up the region.
4165       // If the current region is humongous then we only need to check
4166       // the bitmap for the bit associated with the start of the object,
4167       // scan the object if it's live, and give up the region.
4168       // Otherwise, let's iterate over the bitmap of the part of the region
4169       // that is left.
4170       // If the iteration is successful, give up the region.
4171       if (mr.is_empty()) {
4172         giveup_current_region();
4173         regular_clock_call();
4174       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4175         if (_nextMarkBitMap->isMarked(mr.start())) {
4176           // The object is marked - apply the closure
4177           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4178           bitmap_closure.do_bit(offset);
4179         }
4180         // Even if this task aborted while scanning the humongous object
4181         // we can (and should) give up the current region.
4182         giveup_current_region();
4183         regular_clock_call();
4184       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4185         giveup_current_region();
4186         regular_clock_call();
4187       } else {
4188         assert(has_aborted(), "currently the only way to do so");
4189         // The only way to abort the bitmap iteration is to return
4190         // false from the do_bit() method. However, inside the
4191         // do_bit() method we move the _finger to point to the
4192         // object currently being looked at. So, if we bail out, we
4193         // have definitely set _finger to something non-null.
4194         assert(_finger != NULL, "invariant");
4195 
4196         // Region iteration was actually aborted. So now _finger
4197         // points to the address of the object we last scanned. If we
4198         // leave it there, when we restart this task, we will rescan
4199         // the object. It is easy to avoid this. We move the finger by
4200         // enough to point to the next possible object header (the
4201         // bitmap knows by how much we need to move it as it knows its
4202         // granularity).
4203         assert(_finger < _region_limit, "invariant");
4204         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4205         // Check if bitmap iteration was aborted while scanning the last object
4206         if (new_finger >= _region_limit) {
4207           giveup_current_region();
4208         } else {
4209           move_finger_to(new_finger);
4210         }
4211       }
4212     }
4213     // At this point we have either completed iterating over the
4214     // region we were holding on to, or we have aborted.
4215 
4216     // We then partially drain the local queue and the global stack.
4217     // (Do we really need this?)
4218     drain_local_queue(true);
4219     drain_global_stack(true);
4220 
4221     // Read the note on the claim_region() method on why it might
4222     // return NULL with potentially more regions available for
4223     // claiming and why we have to check out_of_regions() to determine
4224     // whether we're done or not.
4225     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4226       // We are going to try to claim a new region. We should have
4227       // given up on the previous one.
4228       // Separated the asserts so that we know which one fires.
4229       assert(_curr_region  == NULL, "invariant");
4230       assert(_finger       == NULL, "invariant");
4231       assert(_region_limit == NULL, "invariant");
4232       if (_cm->verbose_low()) {
4233         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4234       }
4235       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4236       if (claimed_region != NULL) {
4237         // Yes, we managed to claim one
4238         statsOnly( ++_regions_claimed );
4239 
4240         if (_cm->verbose_low()) {
4241           gclog_or_tty->print_cr("[%u] we successfully claimed "
4242                                  "region "PTR_FORMAT,
4243                                  _worker_id, claimed_region);
4244         }
4245 
4246         setup_for_region(claimed_region);
4247         assert(_curr_region == claimed_region, "invariant");
4248       }
4249       // It is important to call the regular clock here. It might take
4250       // a while to claim a region if, for example, we hit a large
4251       // block of empty regions. So we need to call the regular clock
4252       // method once round the loop to make sure it's called
4253       // frequently enough.
4254       regular_clock_call();
4255     }
4256 
4257     if (!has_aborted() && _curr_region == NULL) {
4258       assert(_cm->out_of_regions(),
4259              "at this point we should be out of regions");
4260     }
4261   } while ( _curr_region != NULL && !has_aborted());
4262 
4263   if (!has_aborted()) {
4264     // We cannot check whether the global stack is empty, since other
4265     // tasks might be pushing objects to it concurrently.
4266     assert(_cm->out_of_regions(),
4267            "at this point we should be out of regions");
4268 
4269     if (_cm->verbose_low()) {
4270       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4271     }
4272 
4273     // Try to reduce the number of available SATB buffers so that
4274     // remark has less work to do.
4275     drain_satb_buffers();
4276   }
4277 
4278   // Since we've done everything else, we can now totally drain the
4279   // local queue and global stack.
4280   drain_local_queue(false);
4281   drain_global_stack(false);
4282 
4283   // Attempt at work stealing from other task's queues.
4284   if (do_stealing && !has_aborted()) {
4285     // We have not aborted. This means that we have finished all that
4286     // we could. Let's try to do some stealing...
4287 
4288     // We cannot check whether the global stack is empty, since other
4289     // tasks might be pushing objects to it concurrently.
4290     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4291            "only way to reach here");
4292 
4293     if (_cm->verbose_low()) {
4294       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4295     }
4296 
4297     while (!has_aborted()) {
4298       oop obj;
4299       statsOnly( ++_steal_attempts );
4300 
4301       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4302         if (_cm->verbose_medium()) {
4303           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4304                                  _worker_id, (void*) obj);
4305         }
4306 
4307         statsOnly( ++_steals );
4308 
4309         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4310                "any stolen object should be marked");
4311         scan_object(obj);
4312 
4313         // And since we're towards the end, let's totally drain the
4314         // local queue and global stack.
4315         drain_local_queue(false);
4316         drain_global_stack(false);
4317       } else {
4318         break;
4319       }
4320     }
4321   }
4322 
4323   // If we are about to wrap up and go into termination, check if we
4324   // should raise the overflow flag.
4325   if (do_termination && !has_aborted()) {
4326     if (_cm->force_overflow()->should_force()) {
4327       _cm->set_has_overflown();
4328       regular_clock_call();
4329     }
4330   }
4331 
4332   // We still haven't aborted. Now, let's try to get into the
4333   // termination protocol.
4334   if (do_termination && !has_aborted()) {
4335     // We cannot check whether the global stack is empty, since other
4336     // tasks might be concurrently pushing objects on it.
4337     // Separated the asserts so that we know which one fires.
4338     assert(_cm->out_of_regions(), "only way to reach here");
4339     assert(_task_queue->size() == 0, "only way to reach here");
4340 
4341     if (_cm->verbose_low()) {
4342       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4343     }
4344 
4345     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4346 
4347     // The CMTask class also extends the TerminatorTerminator class,
4348     // hence its should_exit_termination() method will also decide
4349     // whether to exit the termination protocol or not.
4350     bool finished = (is_serial ||
4351                      _cm->terminator()->offer_termination(this));
4352     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4353     _termination_time_ms +=
4354       termination_end_time_ms - _termination_start_time_ms;
4355 
4356     if (finished) {
4357       // We're all done.
4358 
4359       if (_worker_id == 0) {
4360         // let's allow task 0 to do this
4361         if (concurrent()) {
4362           assert(_cm->concurrent_marking_in_progress(), "invariant");
4363           // we need to set this to false before the next
4364           // safepoint. This way we ensure that the marking phase
4365           // doesn't observe any more heap expansions.
4366           _cm->clear_concurrent_marking_in_progress();
4367         }
4368       }
4369 
4370       // We can now guarantee that the global stack is empty, since
4371       // all other tasks have finished. We separated the guarantees so
4372       // that, if a condition is false, we can immediately find out
4373       // which one.
4374       guarantee(_cm->out_of_regions(), "only way to reach here");
4375       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4376       guarantee(_task_queue->size() == 0, "only way to reach here");
4377       guarantee(!_cm->has_overflown(), "only way to reach here");
4378       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4379 
4380       if (_cm->verbose_low()) {
4381         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4382       }
4383     } else {
4384       // Apparently there's more work to do. Let's abort this task. It
4385       // will restart it and we can hopefully find more things to do.
4386 
4387       if (_cm->verbose_low()) {
4388         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4389                                _worker_id);
4390       }
4391 
4392       set_has_aborted();
4393       statsOnly( ++_aborted_termination );
4394     }
4395   }
4396 
4397   // Mainly for debugging purposes to make sure that a pointer to the
4398   // closure which was statically allocated in this frame doesn't
4399   // escape it by accident.
4400   set_cm_oop_closure(NULL);
4401   double end_time_ms = os::elapsedVTime() * 1000.0;
4402   double elapsed_time_ms = end_time_ms - _start_time_ms;
4403   // Update the step history.
4404   _step_times_ms.add(elapsed_time_ms);
4405 
4406   if (has_aborted()) {
4407     // The task was aborted for some reason.
4408 
4409     statsOnly( ++_aborted );
4410 
4411     if (_has_timed_out) {
4412       double diff_ms = elapsed_time_ms - _time_target_ms;
4413       // Keep statistics of how well we did with respect to hitting
4414       // our target only if we actually timed out (if we aborted for
4415       // other reasons, then the results might get skewed).
4416       _marking_step_diffs_ms.add(diff_ms);
4417     }
4418 
4419     if (_cm->has_overflown()) {
4420       // This is the interesting one. We aborted because a global
4421       // overflow was raised. This means we have to restart the
4422       // marking phase and start iterating over regions. However, in
4423       // order to do this we have to make sure that all tasks stop
4424       // what they are doing and re-initialize in a safe manner. We
4425       // will achieve this with the use of two barrier sync points.
4426 
4427       if (_cm->verbose_low()) {
4428         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4429       }
4430 
4431       if (!is_serial) {
4432         // We only need to enter the sync barrier if being called
4433         // from a parallel context
4434         _cm->enter_first_sync_barrier(_worker_id);
4435 
4436         // When we exit this sync barrier we know that all tasks have
4437         // stopped doing marking work. So, it's now safe to
4438         // re-initialize our data structures. At the end of this method,
4439         // task 0 will clear the global data structures.
4440       }
4441 
4442       statsOnly( ++_aborted_overflow );
4443 
4444       // We clear the local state of this task...
4445       clear_region_fields();
4446 
4447       if (!is_serial) {
4448         // ...and enter the second barrier.
4449         _cm->enter_second_sync_barrier(_worker_id);
4450       }
4451       // At this point, if we're during the concurrent phase of
4452       // marking, everything has been re-initialized and we're
4453       // ready to restart.
4454     }
4455 
4456     if (_cm->verbose_low()) {
4457       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4458                              "elapsed = %1.2lfms <<<<<<<<<<",
4459                              _worker_id, _time_target_ms, elapsed_time_ms);
4460       if (_cm->has_aborted()) {
4461         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4462                                _worker_id);
4463       }
4464     }
4465   } else {
4466     if (_cm->verbose_low()) {
4467       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4468                              "elapsed = %1.2lfms <<<<<<<<<<",
4469                              _worker_id, _time_target_ms, elapsed_time_ms);
4470     }
4471   }
4472 
4473   _claimed = false;
4474 }
4475 
4476 CMTask::CMTask(uint worker_id,
4477                ConcurrentMark* cm,
4478                size_t* marked_bytes,
4479                BitMap* card_bm,
4480                CMTaskQueue* task_queue,
4481                CMTaskQueueSet* task_queues)
4482   : _g1h(G1CollectedHeap::heap()),
4483     _worker_id(worker_id), _cm(cm),
4484     _claimed(false),
4485     _nextMarkBitMap(NULL), _hash_seed(17),
4486     _task_queue(task_queue),
4487     _task_queues(task_queues),
4488     _cm_oop_closure(NULL),
4489     _marked_bytes_array(marked_bytes),
4490     _card_bm(card_bm) {
4491   guarantee(task_queue != NULL, "invariant");
4492   guarantee(task_queues != NULL, "invariant");
4493 
4494   statsOnly( _clock_due_to_scanning = 0;
4495              _clock_due_to_marking  = 0 );
4496 
4497   _marking_step_diffs_ms.add(0.5);
4498 }
4499 
4500 // These are formatting macros that are used below to ensure
4501 // consistent formatting. The *_H_* versions are used to format the
4502 // header for a particular value and they should be kept consistent
4503 // with the corresponding macro. Also note that most of the macros add
4504 // the necessary white space (as a prefix) which makes them a bit
4505 // easier to compose.
4506 
4507 // All the output lines are prefixed with this string to be able to
4508 // identify them easily in a large log file.
4509 #define G1PPRL_LINE_PREFIX            "###"
4510 
4511 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4512 #ifdef _LP64
4513 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4514 #else // _LP64
4515 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4516 #endif // _LP64
4517 
4518 // For per-region info
4519 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4520 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4521 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4522 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4523 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4524 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4525 
4526 // For summary info
4527 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4528 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4529 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4530 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4531 
4532 G1PrintRegionLivenessInfoClosure::
4533 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4534   : _out(out),
4535     _total_used_bytes(0), _total_capacity_bytes(0),
4536     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4537     _hum_used_bytes(0), _hum_capacity_bytes(0),
4538     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4539     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4540   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4541   MemRegion g1_committed = g1h->g1_committed();
4542   MemRegion g1_reserved = g1h->g1_reserved();
4543   double now = os::elapsedTime();
4544 
4545   // Print the header of the output.
4546   _out->cr();
4547   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4548   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4549                  G1PPRL_SUM_ADDR_FORMAT("committed")
4550                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4551                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4552                  g1_committed.start(), g1_committed.end(),
4553                  g1_reserved.start(), g1_reserved.end(),
4554                  HeapRegion::GrainBytes);
4555   _out->print_cr(G1PPRL_LINE_PREFIX);
4556   _out->print_cr(G1PPRL_LINE_PREFIX
4557                 G1PPRL_TYPE_H_FORMAT
4558                 G1PPRL_ADDR_BASE_H_FORMAT
4559                 G1PPRL_BYTE_H_FORMAT
4560                 G1PPRL_BYTE_H_FORMAT
4561                 G1PPRL_BYTE_H_FORMAT
4562                 G1PPRL_DOUBLE_H_FORMAT
4563                 G1PPRL_BYTE_H_FORMAT
4564                 G1PPRL_BYTE_H_FORMAT,
4565                 "type", "address-range",
4566                 "used", "prev-live", "next-live", "gc-eff",
4567                 "remset", "code-roots");
4568   _out->print_cr(G1PPRL_LINE_PREFIX
4569                 G1PPRL_TYPE_H_FORMAT
4570                 G1PPRL_ADDR_BASE_H_FORMAT
4571                 G1PPRL_BYTE_H_FORMAT
4572                 G1PPRL_BYTE_H_FORMAT
4573                 G1PPRL_BYTE_H_FORMAT
4574                 G1PPRL_DOUBLE_H_FORMAT
4575                 G1PPRL_BYTE_H_FORMAT
4576                 G1PPRL_BYTE_H_FORMAT,
4577                 "", "",
4578                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4579                 "(bytes)", "(bytes)");
4580 }
4581 
4582 // It takes as a parameter a reference to one of the _hum_* fields, it
4583 // deduces the corresponding value for a region in a humongous region
4584 // series (either the region size, or what's left if the _hum_* field
4585 // is < the region size), and updates the _hum_* field accordingly.
4586 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4587   size_t bytes = 0;
4588   // The > 0 check is to deal with the prev and next live bytes which
4589   // could be 0.
4590   if (*hum_bytes > 0) {
4591     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4592     *hum_bytes -= bytes;
4593   }
4594   return bytes;
4595 }
4596 
4597 // It deduces the values for a region in a humongous region series
4598 // from the _hum_* fields and updates those accordingly. It assumes
4599 // that that _hum_* fields have already been set up from the "starts
4600 // humongous" region and we visit the regions in address order.
4601 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4602                                                      size_t* capacity_bytes,
4603                                                      size_t* prev_live_bytes,
4604                                                      size_t* next_live_bytes) {
4605   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4606   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4607   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4608   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4609   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4610 }
4611 
4612 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4613   const char* type = "";
4614   HeapWord* bottom       = r->bottom();
4615   HeapWord* end          = r->end();
4616   size_t capacity_bytes  = r->capacity();
4617   size_t used_bytes      = r->used();
4618   size_t prev_live_bytes = r->live_bytes();
4619   size_t next_live_bytes = r->next_live_bytes();
4620   double gc_eff          = r->gc_efficiency();
4621   size_t remset_bytes    = r->rem_set()->mem_size();
4622   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4623 
4624   if (r->used() == 0) {
4625     type = "FREE";
4626   } else if (r->is_survivor()) {
4627     type = "SURV";
4628   } else if (r->is_young()) {
4629     type = "EDEN";
4630   } else if (r->startsHumongous()) {
4631     type = "HUMS";
4632 
4633     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4634            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4635            "they should have been zeroed after the last time we used them");
4636     // Set up the _hum_* fields.
4637     _hum_capacity_bytes  = capacity_bytes;
4638     _hum_used_bytes      = used_bytes;
4639     _hum_prev_live_bytes = prev_live_bytes;
4640     _hum_next_live_bytes = next_live_bytes;
4641     get_hum_bytes(&used_bytes, &capacity_bytes,
4642                   &prev_live_bytes, &next_live_bytes);
4643     end = bottom + HeapRegion::GrainWords;
4644   } else if (r->continuesHumongous()) {
4645     type = "HUMC";
4646     get_hum_bytes(&used_bytes, &capacity_bytes,
4647                   &prev_live_bytes, &next_live_bytes);
4648     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4649   } else {
4650     type = "OLD";
4651   }
4652 
4653   _total_used_bytes      += used_bytes;
4654   _total_capacity_bytes  += capacity_bytes;
4655   _total_prev_live_bytes += prev_live_bytes;
4656   _total_next_live_bytes += next_live_bytes;
4657   _total_remset_bytes    += remset_bytes;
4658   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4659 
4660   // Print a line for this particular region.
4661   _out->print_cr(G1PPRL_LINE_PREFIX
4662                  G1PPRL_TYPE_FORMAT
4663                  G1PPRL_ADDR_BASE_FORMAT
4664                  G1PPRL_BYTE_FORMAT
4665                  G1PPRL_BYTE_FORMAT
4666                  G1PPRL_BYTE_FORMAT
4667                  G1PPRL_DOUBLE_FORMAT
4668                  G1PPRL_BYTE_FORMAT
4669                  G1PPRL_BYTE_FORMAT,
4670                  type, bottom, end,
4671                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4672                  remset_bytes, strong_code_roots_bytes);
4673 
4674   return false;
4675 }
4676 
4677 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4678   // add static memory usages to remembered set sizes
4679   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4680   // Print the footer of the output.
4681   _out->print_cr(G1PPRL_LINE_PREFIX);
4682   _out->print_cr(G1PPRL_LINE_PREFIX
4683                  " SUMMARY"
4684                  G1PPRL_SUM_MB_FORMAT("capacity")
4685                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4686                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4687                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4688                  G1PPRL_SUM_MB_FORMAT("remset")
4689                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4690                  bytes_to_mb(_total_capacity_bytes),
4691                  bytes_to_mb(_total_used_bytes),
4692                  perc(_total_used_bytes, _total_capacity_bytes),
4693                  bytes_to_mb(_total_prev_live_bytes),
4694                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4695                  bytes_to_mb(_total_next_live_bytes),
4696                  perc(_total_next_live_bytes, _total_capacity_bytes),
4697                  bytes_to_mb(_total_remset_bytes),
4698                  bytes_to_mb(_total_strong_code_roots_bytes));
4699   _out->cr();
4700 }