1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "memory/genOopClosures.inline.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "services/memTracker.hpp"
  46 
  47 // Concurrent marking bit map wrapper
  48 
  49 CMBitMapRO::CMBitMapRO(int shifter) :
  50   _bm(),
  51   _shifter(shifter) {
  52   _bmStartWord = 0;
  53   _bmWordSize = 0;
  54 }
  55 
  56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  57                                                HeapWord* limit) const {
  58   // First we must round addr *up* to a possible object boundary.
  59   addr = (HeapWord*)align_size_up((intptr_t)addr,
  60                                   HeapWordSize << _shifter);
  61   size_t addrOffset = heapWordToOffset(addr);
  62   if (limit == NULL) {
  63     limit = _bmStartWord + _bmWordSize;
  64   }
  65   size_t limitOffset = heapWordToOffset(limit);
  66   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  67   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  68   assert(nextAddr >= addr, "get_next_one postcondition");
  69   assert(nextAddr == limit || isMarked(nextAddr),
  70          "get_next_one postcondition");
  71   return nextAddr;
  72 }
  73 
  74 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
  75                                                  HeapWord* limit) const {
  76   size_t addrOffset = heapWordToOffset(addr);
  77   if (limit == NULL) {
  78     limit = _bmStartWord + _bmWordSize;
  79   }
  80   size_t limitOffset = heapWordToOffset(limit);
  81   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  82   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  83   assert(nextAddr >= addr, "get_next_one postcondition");
  84   assert(nextAddr == limit || !isMarked(nextAddr),
  85          "get_next_one postcondition");
  86   return nextAddr;
  87 }
  88 
  89 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  90   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  91   return (int) (diff >> _shifter);
  92 }
  93 
  94 #ifndef PRODUCT
  95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
  96   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  97   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  98          "size inconsistency");
  99   return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
 100          _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 101 }
 102 #endif
 103 
 104 bool CMBitMap::allocate(ReservedSpace heap_rs) {
 105   _bmStartWord = (HeapWord*)(heap_rs.base());
 106   _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
 107   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
 108                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 109   if (!brs.is_reserved()) {
 110     warning("ConcurrentMark marking bit map allocation failure");
 111     return false;
 112   }
 113   MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
 114   // For now we'll just commit all of the bit map up front.
 115   // Later on we'll try to be more parsimonious with swap.
 116   if (!_virtual_space.initialize(brs, brs.size())) {
 117     warning("ConcurrentMark marking bit map backing store failure");
 118     return false;
 119   }
 120   assert(_virtual_space.committed_size() == brs.size(),
 121          "didn't reserve backing store for all of concurrent marking bit map?");
 122   _bm.set_map((uintptr_t*)_virtual_space.low());
 123   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
 124          _bmWordSize, "inconsistency in bit map sizing");
 125   _bm.set_size(_bmWordSize >> _shifter);
 126   return true;
 127 }
 128 
 129 void CMBitMap::clearAll() {
 130   _bm.clear();
 131   return;
 132 }
 133 
 134 void CMBitMap::markRange(MemRegion mr) {
 135   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 136   assert(!mr.is_empty(), "unexpected empty region");
 137   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 138           ((HeapWord *) mr.end())),
 139          "markRange memory region end is not card aligned");
 140   // convert address range into offset range
 141   _bm.at_put_range(heapWordToOffset(mr.start()),
 142                    heapWordToOffset(mr.end()), true);
 143 }
 144 
 145 void CMBitMap::clearRange(MemRegion mr) {
 146   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 147   assert(!mr.is_empty(), "unexpected empty region");
 148   // convert address range into offset range
 149   _bm.at_put_range(heapWordToOffset(mr.start()),
 150                    heapWordToOffset(mr.end()), false);
 151 }
 152 
 153 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 154                                             HeapWord* end_addr) {
 155   HeapWord* start = getNextMarkedWordAddress(addr);
 156   start = MIN2(start, end_addr);
 157   HeapWord* end   = getNextUnmarkedWordAddress(start);
 158   end = MIN2(end, end_addr);
 159   assert(start <= end, "Consistency check");
 160   MemRegion mr(start, end);
 161   if (!mr.is_empty()) {
 162     clearRange(mr);
 163   }
 164   return mr;
 165 }
 166 
 167 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 168   _base(NULL), _cm(cm)
 169 #ifdef ASSERT
 170   , _drain_in_progress(false)
 171   , _drain_in_progress_yields(false)
 172 #endif
 173 {}
 174 
 175 bool CMMarkStack::allocate(size_t capacity) {
 176   // allocate a stack of the requisite depth
 177   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 178   if (!rs.is_reserved()) {
 179     warning("ConcurrentMark MarkStack allocation failure");
 180     return false;
 181   }
 182   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 183   if (!_virtual_space.initialize(rs, rs.size())) {
 184     warning("ConcurrentMark MarkStack backing store failure");
 185     // Release the virtual memory reserved for the marking stack
 186     rs.release();
 187     return false;
 188   }
 189   assert(_virtual_space.committed_size() == rs.size(),
 190          "Didn't reserve backing store for all of ConcurrentMark stack?");
 191   _base = (oop*) _virtual_space.low();
 192   setEmpty();
 193   _capacity = (jint) capacity;
 194   _saved_index = -1;
 195   _should_expand = false;
 196   NOT_PRODUCT(_max_depth = 0);
 197   return true;
 198 }
 199 
 200 void CMMarkStack::expand() {
 201   // Called, during remark, if we've overflown the marking stack during marking.
 202   assert(isEmpty(), "stack should been emptied while handling overflow");
 203   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 204   // Clear expansion flag
 205   _should_expand = false;
 206   if (_capacity == (jint) MarkStackSizeMax) {
 207     if (PrintGCDetails && Verbose) {
 208       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 209     }
 210     return;
 211   }
 212   // Double capacity if possible
 213   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 214   // Do not give up existing stack until we have managed to
 215   // get the double capacity that we desired.
 216   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 217                                                            sizeof(oop)));
 218   if (rs.is_reserved()) {
 219     // Release the backing store associated with old stack
 220     _virtual_space.release();
 221     // Reinitialize virtual space for new stack
 222     if (!_virtual_space.initialize(rs, rs.size())) {
 223       fatal("Not enough swap for expanded marking stack capacity");
 224     }
 225     _base = (oop*)(_virtual_space.low());
 226     _index = 0;
 227     _capacity = new_capacity;
 228   } else {
 229     if (PrintGCDetails && Verbose) {
 230       // Failed to double capacity, continue;
 231       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 232                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 233                           _capacity / K, new_capacity / K);
 234     }
 235   }
 236 }
 237 
 238 void CMMarkStack::set_should_expand() {
 239   // If we're resetting the marking state because of an
 240   // marking stack overflow, record that we should, if
 241   // possible, expand the stack.
 242   _should_expand = _cm->has_overflown();
 243 }
 244 
 245 CMMarkStack::~CMMarkStack() {
 246   if (_base != NULL) {
 247     _base = NULL;
 248     _virtual_space.release();
 249   }
 250 }
 251 
 252 void CMMarkStack::par_push(oop ptr) {
 253   while (true) {
 254     if (isFull()) {
 255       _overflow = true;
 256       return;
 257     }
 258     // Otherwise...
 259     jint index = _index;
 260     jint next_index = index+1;
 261     jint res = Atomic::cmpxchg(next_index, &_index, index);
 262     if (res == index) {
 263       _base[index] = ptr;
 264       // Note that we don't maintain this atomically.  We could, but it
 265       // doesn't seem necessary.
 266       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 267       return;
 268     }
 269     // Otherwise, we need to try again.
 270   }
 271 }
 272 
 273 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 274   while (true) {
 275     if (isFull()) {
 276       _overflow = true;
 277       return;
 278     }
 279     // Otherwise...
 280     jint index = _index;
 281     jint next_index = index + n;
 282     if (next_index > _capacity) {
 283       _overflow = true;
 284       return;
 285     }
 286     jint res = Atomic::cmpxchg(next_index, &_index, index);
 287     if (res == index) {
 288       for (int i = 0; i < n; i++) {
 289         int  ind = index + i;
 290         assert(ind < _capacity, "By overflow test above.");
 291         _base[ind] = ptr_arr[i];
 292       }
 293       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 294       return;
 295     }
 296     // Otherwise, we need to try again.
 297   }
 298 }
 299 
 300 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 301   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 302   jint start = _index;
 303   jint next_index = start + n;
 304   if (next_index > _capacity) {
 305     _overflow = true;
 306     return;
 307   }
 308   // Otherwise.
 309   _index = next_index;
 310   for (int i = 0; i < n; i++) {
 311     int ind = start + i;
 312     assert(ind < _capacity, "By overflow test above.");
 313     _base[ind] = ptr_arr[i];
 314   }
 315   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 316 }
 317 
 318 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 319   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 320   jint index = _index;
 321   if (index == 0) {
 322     *n = 0;
 323     return false;
 324   } else {
 325     int k = MIN2(max, index);
 326     jint  new_ind = index - k;
 327     for (int j = 0; j < k; j++) {
 328       ptr_arr[j] = _base[new_ind + j];
 329     }
 330     _index = new_ind;
 331     *n = k;
 332     return true;
 333   }
 334 }
 335 
 336 template<class OopClosureClass>
 337 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 338   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 339          || SafepointSynchronize::is_at_safepoint(),
 340          "Drain recursion must be yield-safe.");
 341   bool res = true;
 342   debug_only(_drain_in_progress = true);
 343   debug_only(_drain_in_progress_yields = yield_after);
 344   while (!isEmpty()) {
 345     oop newOop = pop();
 346     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 347     assert(newOop->is_oop(), "Expected an oop");
 348     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 349            "only grey objects on this stack");
 350     newOop->oop_iterate(cl);
 351     if (yield_after && _cm->do_yield_check()) {
 352       res = false;
 353       break;
 354     }
 355   }
 356   debug_only(_drain_in_progress = false);
 357   return res;
 358 }
 359 
 360 void CMMarkStack::note_start_of_gc() {
 361   assert(_saved_index == -1,
 362          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 363   _saved_index = _index;
 364 }
 365 
 366 void CMMarkStack::note_end_of_gc() {
 367   // This is intentionally a guarantee, instead of an assert. If we
 368   // accidentally add something to the mark stack during GC, it
 369   // will be a correctness issue so it's better if we crash. we'll
 370   // only check this once per GC anyway, so it won't be a performance
 371   // issue in any way.
 372   guarantee(_saved_index == _index,
 373             err_msg("saved index: %d index: %d", _saved_index, _index));
 374   _saved_index = -1;
 375 }
 376 
 377 void CMMarkStack::oops_do(OopClosure* f) {
 378   assert(_saved_index == _index,
 379          err_msg("saved index: %d index: %d", _saved_index, _index));
 380   for (int i = 0; i < _index; i += 1) {
 381     f->do_oop(&_base[i]);
 382   }
 383 }
 384 
 385 bool ConcurrentMark::not_yet_marked(oop obj) const {
 386   return _g1h->is_obj_ill(obj);
 387 }
 388 
 389 CMRootRegions::CMRootRegions() :
 390   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 391   _should_abort(false),  _next_survivor(NULL) { }
 392 
 393 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 394   _young_list = g1h->young_list();
 395   _cm = cm;
 396 }
 397 
 398 void CMRootRegions::prepare_for_scan() {
 399   assert(!scan_in_progress(), "pre-condition");
 400 
 401   // Currently, only survivors can be root regions.
 402   assert(_next_survivor == NULL, "pre-condition");
 403   _next_survivor = _young_list->first_survivor_region();
 404   _scan_in_progress = (_next_survivor != NULL);
 405   _should_abort = false;
 406 }
 407 
 408 HeapRegion* CMRootRegions::claim_next() {
 409   if (_should_abort) {
 410     // If someone has set the should_abort flag, we return NULL to
 411     // force the caller to bail out of their loop.
 412     return NULL;
 413   }
 414 
 415   // Currently, only survivors can be root regions.
 416   HeapRegion* res = _next_survivor;
 417   if (res != NULL) {
 418     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 419     // Read it again in case it changed while we were waiting for the lock.
 420     res = _next_survivor;
 421     if (res != NULL) {
 422       if (res == _young_list->last_survivor_region()) {
 423         // We just claimed the last survivor so store NULL to indicate
 424         // that we're done.
 425         _next_survivor = NULL;
 426       } else {
 427         _next_survivor = res->get_next_young_region();
 428       }
 429     } else {
 430       // Someone else claimed the last survivor while we were trying
 431       // to take the lock so nothing else to do.
 432     }
 433   }
 434   assert(res == NULL || res->is_survivor(), "post-condition");
 435 
 436   return res;
 437 }
 438 
 439 void CMRootRegions::scan_finished() {
 440   assert(scan_in_progress(), "pre-condition");
 441 
 442   // Currently, only survivors can be root regions.
 443   if (!_should_abort) {
 444     assert(_next_survivor == NULL, "we should have claimed all survivors");
 445   }
 446   _next_survivor = NULL;
 447 
 448   {
 449     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 450     _scan_in_progress = false;
 451     RootRegionScan_lock->notify_all();
 452   }
 453 }
 454 
 455 bool CMRootRegions::wait_until_scan_finished() {
 456   if (!scan_in_progress()) return false;
 457 
 458   {
 459     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 460     while (scan_in_progress()) {
 461       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 462     }
 463   }
 464   return true;
 465 }
 466 
 467 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 468 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 469 #endif // _MSC_VER
 470 
 471 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 472   return MAX2((n_par_threads + 2) / 4, 1U);
 473 }
 474 
 475 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 476   _g1h(g1h),
 477   _markBitMap1(MinObjAlignment - 1),
 478   _markBitMap2(MinObjAlignment - 1),
 479 
 480   _parallel_marking_threads(0),
 481   _max_parallel_marking_threads(0),
 482   _sleep_factor(0.0),
 483   _marking_task_overhead(1.0),
 484   _cleanup_sleep_factor(0.0),
 485   _cleanup_task_overhead(1.0),
 486   _cleanup_list("Cleanup List"),
 487   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 488   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 489             CardTableModRefBS::card_shift,
 490             false /* in_resource_area*/),
 491 
 492   _prevMarkBitMap(&_markBitMap1),
 493   _nextMarkBitMap(&_markBitMap2),
 494 
 495   _markStack(this),
 496   // _finger set in set_non_marking_state
 497 
 498   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 499   // _active_tasks set in set_non_marking_state
 500   // _tasks set inside the constructor
 501   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 502   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 503 
 504   _has_overflown(false),
 505   _concurrent(false),
 506   _has_aborted(false),
 507   _restart_for_overflow(false),
 508   _concurrent_marking_in_progress(false),
 509 
 510   // _verbose_level set below
 511 
 512   _init_times(),
 513   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 514   _cleanup_times(),
 515   _total_counting_time(0.0),
 516   _total_rs_scrub_time(0.0),
 517 
 518   _parallel_workers(NULL),
 519 
 520   _count_card_bitmaps(NULL),
 521   _count_marked_bytes(NULL),
 522   _completed_initialization(false) {
 523   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 524   if (verbose_level < no_verbose) {
 525     verbose_level = no_verbose;
 526   }
 527   if (verbose_level > high_verbose) {
 528     verbose_level = high_verbose;
 529   }
 530   _verbose_level = verbose_level;
 531 
 532   if (verbose_low()) {
 533     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 534                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 535   }
 536 
 537   if (!_markBitMap1.allocate(heap_rs)) {
 538     warning("Failed to allocate first CM bit map");
 539     return;
 540   }
 541   if (!_markBitMap2.allocate(heap_rs)) {
 542     warning("Failed to allocate second CM bit map");
 543     return;
 544   }
 545 
 546   // Create & start a ConcurrentMark thread.
 547   _cmThread = new ConcurrentMarkThread(this);
 548   assert(cmThread() != NULL, "CM Thread should have been created");
 549   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 550 
 551   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 552   assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
 553   assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 554 
 555   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 556   satb_qs.set_buffer_size(G1SATBBufferSize);
 557 
 558   _root_regions.init(_g1h, this);
 559 
 560   if (ConcGCThreads > ParallelGCThreads) {
 561     warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
 562             "than ParallelGCThreads (" UINT32_FORMAT ").",
 563             ConcGCThreads, ParallelGCThreads);
 564     return;
 565   }
 566   if (ParallelGCThreads == 0) {
 567     // if we are not running with any parallel GC threads we will not
 568     // spawn any marking threads either
 569     _parallel_marking_threads =       0;
 570     _max_parallel_marking_threads =   0;
 571     _sleep_factor             =     0.0;
 572     _marking_task_overhead    =     1.0;
 573   } else {
 574     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 575       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 576       // if both are set
 577       _sleep_factor             = 0.0;
 578       _marking_task_overhead    = 1.0;
 579     } else if (G1MarkingOverheadPercent > 0) {
 580       // We will calculate the number of parallel marking threads based
 581       // on a target overhead with respect to the soft real-time goal
 582       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 583       double overall_cm_overhead =
 584         (double) MaxGCPauseMillis * marking_overhead /
 585         (double) GCPauseIntervalMillis;
 586       double cpu_ratio = 1.0 / (double) os::processor_count();
 587       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 588       double marking_task_overhead =
 589         overall_cm_overhead / marking_thread_num *
 590                                                 (double) os::processor_count();
 591       double sleep_factor =
 592                          (1.0 - marking_task_overhead) / marking_task_overhead;
 593 
 594       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 595       _sleep_factor             = sleep_factor;
 596       _marking_task_overhead    = marking_task_overhead;
 597     } else {
 598       // Calculate the number of parallel marking threads by scaling
 599       // the number of parallel GC threads.
 600       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 601       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 602       _sleep_factor             = 0.0;
 603       _marking_task_overhead    = 1.0;
 604     }
 605 
 606     assert(ConcGCThreads > 0, "Should have been set");
 607     _parallel_marking_threads = (uint) ConcGCThreads;
 608     _max_parallel_marking_threads = _parallel_marking_threads;
 609 
 610     if (parallel_marking_threads() > 1) {
 611       _cleanup_task_overhead = 1.0;
 612     } else {
 613       _cleanup_task_overhead = marking_task_overhead();
 614     }
 615     _cleanup_sleep_factor =
 616                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 617 
 618 #if 0
 619     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 620     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 621     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 622     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 623     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 624 #endif
 625 
 626     guarantee(parallel_marking_threads() > 0, "peace of mind");
 627     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 628          _max_parallel_marking_threads, false, true);
 629     if (_parallel_workers == NULL) {
 630       vm_exit_during_initialization("Failed necessary allocation.");
 631     } else {
 632       _parallel_workers->initialize_workers();
 633     }
 634   }
 635 
 636   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 637     uintx mark_stack_size =
 638       MIN2(MarkStackSizeMax,
 639           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 640     // Verify that the calculated value for MarkStackSize is in range.
 641     // It would be nice to use the private utility routine from Arguments.
 642     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 643       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 644               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 645               mark_stack_size, 1, MarkStackSizeMax);
 646       return;
 647     }
 648     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 649   } else {
 650     // Verify MarkStackSize is in range.
 651     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 652       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 653         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 654           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 655                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 656                   MarkStackSize, 1, MarkStackSizeMax);
 657           return;
 658         }
 659       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 660         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 661           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 662                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 663                   MarkStackSize, MarkStackSizeMax);
 664           return;
 665         }
 666       }
 667     }
 668   }
 669 
 670   if (!_markStack.allocate(MarkStackSize)) {
 671     warning("Failed to allocate CM marking stack");
 672     return;
 673   }
 674 
 675   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 676   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 677 
 678   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 679   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 680 
 681   BitMap::idx_t card_bm_size = _card_bm.size();
 682 
 683   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 684   _active_tasks = _max_worker_id;
 685 
 686   size_t max_regions = (size_t) _g1h->max_regions();
 687   for (uint i = 0; i < _max_worker_id; ++i) {
 688     CMTaskQueue* task_queue = new CMTaskQueue();
 689     task_queue->initialize();
 690     _task_queues->register_queue(i, task_queue);
 691 
 692     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 693     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 694 
 695     _tasks[i] = new CMTask(i, this,
 696                            _count_marked_bytes[i],
 697                            &_count_card_bitmaps[i],
 698                            task_queue, _task_queues);
 699 
 700     _accum_task_vtime[i] = 0.0;
 701   }
 702 
 703   // Calculate the card number for the bottom of the heap. Used
 704   // in biasing indexes into the accounting card bitmaps.
 705   _heap_bottom_card_num =
 706     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 707                                 CardTableModRefBS::card_shift);
 708 
 709   // Clear all the liveness counting data
 710   clear_all_count_data();
 711 
 712   // so that the call below can read a sensible value
 713   _heap_start = (HeapWord*) heap_rs.base();
 714   set_non_marking_state();
 715   _completed_initialization = true;
 716 }
 717 
 718 void ConcurrentMark::update_g1_committed(bool force) {
 719   // If concurrent marking is not in progress, then we do not need to
 720   // update _heap_end.
 721   if (!concurrent_marking_in_progress() && !force) return;
 722 
 723   MemRegion committed = _g1h->g1_committed();
 724   assert(committed.start() == _heap_start, "start shouldn't change");
 725   HeapWord* new_end = committed.end();
 726   if (new_end > _heap_end) {
 727     // The heap has been expanded.
 728 
 729     _heap_end = new_end;
 730   }
 731   // Notice that the heap can also shrink. However, this only happens
 732   // during a Full GC (at least currently) and the entire marking
 733   // phase will bail out and the task will not be restarted. So, let's
 734   // do nothing.
 735 }
 736 
 737 void ConcurrentMark::reset() {
 738   // Starting values for these two. This should be called in a STW
 739   // phase. CM will be notified of any future g1_committed expansions
 740   // will be at the end of evacuation pauses, when tasks are
 741   // inactive.
 742   MemRegion committed = _g1h->g1_committed();
 743   _heap_start = committed.start();
 744   _heap_end   = committed.end();
 745 
 746   // Separated the asserts so that we know which one fires.
 747   assert(_heap_start != NULL, "heap bounds should look ok");
 748   assert(_heap_end != NULL, "heap bounds should look ok");
 749   assert(_heap_start < _heap_end, "heap bounds should look ok");
 750 
 751   // Reset all the marking data structures and any necessary flags
 752   reset_marking_state();
 753 
 754   if (verbose_low()) {
 755     gclog_or_tty->print_cr("[global] resetting");
 756   }
 757 
 758   // We do reset all of them, since different phases will use
 759   // different number of active threads. So, it's easiest to have all
 760   // of them ready.
 761   for (uint i = 0; i < _max_worker_id; ++i) {
 762     _tasks[i]->reset(_nextMarkBitMap);
 763   }
 764 
 765   // we need this to make sure that the flag is on during the evac
 766   // pause with initial mark piggy-backed
 767   set_concurrent_marking_in_progress();
 768 }
 769 
 770 
 771 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 772   _markStack.set_should_expand();
 773   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 774   if (clear_overflow) {
 775     clear_has_overflown();
 776   } else {
 777     assert(has_overflown(), "pre-condition");
 778   }
 779   _finger = _heap_start;
 780 
 781   for (uint i = 0; i < _max_worker_id; ++i) {
 782     CMTaskQueue* queue = _task_queues->queue(i);
 783     queue->set_empty();
 784   }
 785 }
 786 
 787 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
 788   assert(active_tasks <= _max_worker_id, "we should not have more");
 789 
 790   _active_tasks = active_tasks;
 791   // Need to update the three data structures below according to the
 792   // number of active threads for this phase.
 793   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 794   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 795   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 796 
 797   _concurrent = concurrent;
 798   // We propagate this to all tasks, not just the active ones.
 799   for (uint i = 0; i < _max_worker_id; ++i)
 800     _tasks[i]->set_concurrent(concurrent);
 801 
 802   if (concurrent) {
 803     set_concurrent_marking_in_progress();
 804   } else {
 805     // We currently assume that the concurrent flag has been set to
 806     // false before we start remark. At this point we should also be
 807     // in a STW phase.
 808     assert(!concurrent_marking_in_progress(), "invariant");
 809     assert(_finger == _heap_end, "only way to get here");
 810     update_g1_committed(true);
 811   }
 812 }
 813 
 814 void ConcurrentMark::set_non_marking_state() {
 815   // We set the global marking state to some default values when we're
 816   // not doing marking.
 817   reset_marking_state();
 818   _active_tasks = 0;
 819   clear_concurrent_marking_in_progress();
 820 }
 821 
 822 ConcurrentMark::~ConcurrentMark() {
 823   // The ConcurrentMark instance is never freed.
 824   ShouldNotReachHere();
 825 }
 826 
 827 void ConcurrentMark::clearNextBitmap() {
 828   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 829   G1CollectorPolicy* g1p = g1h->g1_policy();
 830 
 831   // Make sure that the concurrent mark thread looks to still be in
 832   // the current cycle.
 833   guarantee(cmThread()->during_cycle(), "invariant");
 834 
 835   // We are finishing up the current cycle by clearing the next
 836   // marking bitmap and getting it ready for the next cycle. During
 837   // this time no other cycle can start. So, let's make sure that this
 838   // is the case.
 839   guarantee(!g1h->mark_in_progress(), "invariant");
 840 
 841   // clear the mark bitmap (no grey objects to start with).
 842   // We need to do this in chunks and offer to yield in between
 843   // each chunk.
 844   HeapWord* start  = _nextMarkBitMap->startWord();
 845   HeapWord* end    = _nextMarkBitMap->endWord();
 846   HeapWord* cur    = start;
 847   size_t chunkSize = M;
 848   while (cur < end) {
 849     HeapWord* next = cur + chunkSize;
 850     if (next > end) {
 851       next = end;
 852     }
 853     MemRegion mr(cur,next);
 854     _nextMarkBitMap->clearRange(mr);
 855     cur = next;
 856     do_yield_check();
 857 
 858     // Repeat the asserts from above. We'll do them as asserts here to
 859     // minimize their overhead on the product. However, we'll have
 860     // them as guarantees at the beginning / end of the bitmap
 861     // clearing to get some checking in the product.
 862     assert(cmThread()->during_cycle(), "invariant");
 863     assert(!g1h->mark_in_progress(), "invariant");
 864   }
 865 
 866   // Clear the liveness counting data
 867   clear_all_count_data();
 868 
 869   // Repeat the asserts from above.
 870   guarantee(cmThread()->during_cycle(), "invariant");
 871   guarantee(!g1h->mark_in_progress(), "invariant");
 872 }
 873 
 874 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 875 public:
 876   bool doHeapRegion(HeapRegion* r) {
 877     if (!r->continuesHumongous()) {
 878       r->note_start_of_marking();
 879     }
 880     return false;
 881   }
 882 };
 883 
 884 void ConcurrentMark::checkpointRootsInitialPre() {
 885   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 886   G1CollectorPolicy* g1p = g1h->g1_policy();
 887 
 888   _has_aborted = false;
 889 
 890 #ifndef PRODUCT
 891   if (G1PrintReachableAtInitialMark) {
 892     print_reachable("at-cycle-start",
 893                     VerifyOption_G1UsePrevMarking, true /* all */);
 894   }
 895 #endif
 896 
 897   // Initialise marking structures. This has to be done in a STW phase.
 898   reset();
 899 
 900   // For each region note start of marking.
 901   NoteStartOfMarkHRClosure startcl;
 902   g1h->heap_region_iterate(&startcl);
 903 }
 904 
 905 
 906 void ConcurrentMark::checkpointRootsInitialPost() {
 907   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 908 
 909   // If we force an overflow during remark, the remark operation will
 910   // actually abort and we'll restart concurrent marking. If we always
 911   // force an oveflow during remark we'll never actually complete the
 912   // marking phase. So, we initilize this here, at the start of the
 913   // cycle, so that at the remaining overflow number will decrease at
 914   // every remark and we'll eventually not need to cause one.
 915   force_overflow_stw()->init();
 916 
 917   // Start Concurrent Marking weak-reference discovery.
 918   ReferenceProcessor* rp = g1h->ref_processor_cm();
 919   // enable ("weak") refs discovery
 920   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 921   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 922 
 923   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 924   // This is the start of  the marking cycle, we're expected all
 925   // threads to have SATB queues with active set to false.
 926   satb_mq_set.set_active_all_threads(true, /* new active value */
 927                                      false /* expected_active */);
 928 
 929   _root_regions.prepare_for_scan();
 930 
 931   // update_g1_committed() will be called at the end of an evac pause
 932   // when marking is on. So, it's also called at the end of the
 933   // initial-mark pause to update the heap end, if the heap expands
 934   // during it. No need to call it here.
 935 }
 936 
 937 /*
 938  * Notice that in the next two methods, we actually leave the STS
 939  * during the barrier sync and join it immediately afterwards. If we
 940  * do not do this, the following deadlock can occur: one thread could
 941  * be in the barrier sync code, waiting for the other thread to also
 942  * sync up, whereas another one could be trying to yield, while also
 943  * waiting for the other threads to sync up too.
 944  *
 945  * Note, however, that this code is also used during remark and in
 946  * this case we should not attempt to leave / enter the STS, otherwise
 947  * we'll either hit an asseert (debug / fastdebug) or deadlock
 948  * (product). So we should only leave / enter the STS if we are
 949  * operating concurrently.
 950  *
 951  * Because the thread that does the sync barrier has left the STS, it
 952  * is possible to be suspended for a Full GC or an evacuation pause
 953  * could occur. This is actually safe, since the entering the sync
 954  * barrier is one of the last things do_marking_step() does, and it
 955  * doesn't manipulate any data structures afterwards.
 956  */
 957 
 958 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 959   if (verbose_low()) {
 960     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 961   }
 962 
 963   if (concurrent()) {
 964     ConcurrentGCThread::stsLeave();
 965   }
 966   _first_overflow_barrier_sync.enter();
 967   if (concurrent()) {
 968     ConcurrentGCThread::stsJoin();
 969   }
 970   // at this point everyone should have synced up and not be doing any
 971   // more work
 972 
 973   if (verbose_low()) {
 974     gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 975   }
 976 
 977   // let the task associated with with worker 0 do this
 978   if (worker_id == 0) {
 979     // task 0 is responsible for clearing the global data structures
 980     // We should be here because of an overflow. During STW we should
 981     // not clear the overflow flag since we rely on it being true when
 982     // we exit this method to abort the pause and restart concurent
 983     // marking.
 984     reset_marking_state(concurrent() /* clear_overflow */);
 985     force_overflow()->update();
 986 
 987     if (G1Log::fine()) {
 988       gclog_or_tty->date_stamp(PrintGCDateStamps);
 989       gclog_or_tty->stamp(PrintGCTimeStamps);
 990       gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 991     }
 992   }
 993 
 994   // after this, each task should reset its own data structures then
 995   // then go into the second barrier
 996 }
 997 
 998 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 999   if (verbose_low()) {
1000     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1001   }
1002 
1003   if (concurrent()) {
1004     ConcurrentGCThread::stsLeave();
1005   }
1006   _second_overflow_barrier_sync.enter();
1007   if (concurrent()) {
1008     ConcurrentGCThread::stsJoin();
1009   }
1010   // at this point everything should be re-initialised and ready to go
1011 
1012   if (verbose_low()) {
1013     gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1014   }
1015 }
1016 
1017 #ifndef PRODUCT
1018 void ForceOverflowSettings::init() {
1019   _num_remaining = G1ConcMarkForceOverflow;
1020   _force = false;
1021   update();
1022 }
1023 
1024 void ForceOverflowSettings::update() {
1025   if (_num_remaining > 0) {
1026     _num_remaining -= 1;
1027     _force = true;
1028   } else {
1029     _force = false;
1030   }
1031 }
1032 
1033 bool ForceOverflowSettings::should_force() {
1034   if (_force) {
1035     _force = false;
1036     return true;
1037   } else {
1038     return false;
1039   }
1040 }
1041 #endif // !PRODUCT
1042 
1043 class CMConcurrentMarkingTask: public AbstractGangTask {
1044 private:
1045   ConcurrentMark*       _cm;
1046   ConcurrentMarkThread* _cmt;
1047 
1048 public:
1049   void work(uint worker_id) {
1050     assert(Thread::current()->is_ConcurrentGC_thread(),
1051            "this should only be done by a conc GC thread");
1052     ResourceMark rm;
1053 
1054     double start_vtime = os::elapsedVTime();
1055 
1056     ConcurrentGCThread::stsJoin();
1057 
1058     assert(worker_id < _cm->active_tasks(), "invariant");
1059     CMTask* the_task = _cm->task(worker_id);
1060     the_task->record_start_time();
1061     if (!_cm->has_aborted()) {
1062       do {
1063         double start_vtime_sec = os::elapsedVTime();
1064         double start_time_sec = os::elapsedTime();
1065         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1066 
1067         the_task->do_marking_step(mark_step_duration_ms,
1068                                   true /* do_stealing    */,
1069                                   true /* do_termination */);
1070 
1071         double end_time_sec = os::elapsedTime();
1072         double end_vtime_sec = os::elapsedVTime();
1073         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1074         double elapsed_time_sec = end_time_sec - start_time_sec;
1075         _cm->clear_has_overflown();
1076 
1077         bool ret = _cm->do_yield_check(worker_id);
1078 
1079         jlong sleep_time_ms;
1080         if (!_cm->has_aborted() && the_task->has_aborted()) {
1081           sleep_time_ms =
1082             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1083           ConcurrentGCThread::stsLeave();
1084           os::sleep(Thread::current(), sleep_time_ms, false);
1085           ConcurrentGCThread::stsJoin();
1086         }
1087         double end_time2_sec = os::elapsedTime();
1088         double elapsed_time2_sec = end_time2_sec - start_time_sec;
1089 
1090 #if 0
1091           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1092                                  "overhead %1.4lf",
1093                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1094                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
1095           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1096                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1097 #endif
1098       } while (!_cm->has_aborted() && the_task->has_aborted());
1099     }
1100     the_task->record_end_time();
1101     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1102 
1103     ConcurrentGCThread::stsLeave();
1104 
1105     double end_vtime = os::elapsedVTime();
1106     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1107   }
1108 
1109   CMConcurrentMarkingTask(ConcurrentMark* cm,
1110                           ConcurrentMarkThread* cmt) :
1111       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1112 
1113   ~CMConcurrentMarkingTask() { }
1114 };
1115 
1116 // Calculates the number of active workers for a concurrent
1117 // phase.
1118 uint ConcurrentMark::calc_parallel_marking_threads() {
1119   if (G1CollectedHeap::use_parallel_gc_threads()) {
1120     uint n_conc_workers = 0;
1121     if (!UseDynamicNumberOfGCThreads ||
1122         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1123          !ForceDynamicNumberOfGCThreads)) {
1124       n_conc_workers = max_parallel_marking_threads();
1125     } else {
1126       n_conc_workers =
1127         AdaptiveSizePolicy::calc_default_active_workers(
1128                                      max_parallel_marking_threads(),
1129                                      1, /* Minimum workers */
1130                                      parallel_marking_threads(),
1131                                      Threads::number_of_non_daemon_threads());
1132       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1133       // that scaling has already gone into "_max_parallel_marking_threads".
1134     }
1135     assert(n_conc_workers > 0, "Always need at least 1");
1136     return n_conc_workers;
1137   }
1138   // If we are not running with any parallel GC threads we will not
1139   // have spawned any marking threads either. Hence the number of
1140   // concurrent workers should be 0.
1141   return 0;
1142 }
1143 
1144 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1145   // Currently, only survivors can be root regions.
1146   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1147   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1148 
1149   const uintx interval = PrefetchScanIntervalInBytes;
1150   HeapWord* curr = hr->bottom();
1151   const HeapWord* end = hr->top();
1152   while (curr < end) {
1153     Prefetch::read(curr, interval);
1154     oop obj = oop(curr);
1155     int size = obj->oop_iterate(&cl);
1156     assert(size == obj->size(), "sanity");
1157     curr += size;
1158   }
1159 }
1160 
1161 class CMRootRegionScanTask : public AbstractGangTask {
1162 private:
1163   ConcurrentMark* _cm;
1164 
1165 public:
1166   CMRootRegionScanTask(ConcurrentMark* cm) :
1167     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1168 
1169   void work(uint worker_id) {
1170     assert(Thread::current()->is_ConcurrentGC_thread(),
1171            "this should only be done by a conc GC thread");
1172 
1173     CMRootRegions* root_regions = _cm->root_regions();
1174     HeapRegion* hr = root_regions->claim_next();
1175     while (hr != NULL) {
1176       _cm->scanRootRegion(hr, worker_id);
1177       hr = root_regions->claim_next();
1178     }
1179   }
1180 };
1181 
1182 void ConcurrentMark::scanRootRegions() {
1183   // scan_in_progress() will have been set to true only if there was
1184   // at least one root region to scan. So, if it's false, we
1185   // should not attempt to do any further work.
1186   if (root_regions()->scan_in_progress()) {
1187     _parallel_marking_threads = calc_parallel_marking_threads();
1188     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1189            "Maximum number of marking threads exceeded");
1190     uint active_workers = MAX2(1U, parallel_marking_threads());
1191 
1192     CMRootRegionScanTask task(this);
1193     if (use_parallel_marking_threads()) {
1194       _parallel_workers->set_active_workers((int) active_workers);
1195       _parallel_workers->run_task(&task);
1196     } else {
1197       task.work(0);
1198     }
1199 
1200     // It's possible that has_aborted() is true here without actually
1201     // aborting the survivor scan earlier. This is OK as it's
1202     // mainly used for sanity checking.
1203     root_regions()->scan_finished();
1204   }
1205 }
1206 
1207 void ConcurrentMark::markFromRoots() {
1208   // we might be tempted to assert that:
1209   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1210   //        "inconsistent argument?");
1211   // However that wouldn't be right, because it's possible that
1212   // a safepoint is indeed in progress as a younger generation
1213   // stop-the-world GC happens even as we mark in this generation.
1214 
1215   _restart_for_overflow = false;
1216   force_overflow_conc()->init();
1217 
1218   // _g1h has _n_par_threads
1219   _parallel_marking_threads = calc_parallel_marking_threads();
1220   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1221     "Maximum number of marking threads exceeded");
1222 
1223   uint active_workers = MAX2(1U, parallel_marking_threads());
1224 
1225   // Parallel task terminator is set in "set_phase()"
1226   set_phase(active_workers, true /* concurrent */);
1227 
1228   CMConcurrentMarkingTask markingTask(this, cmThread());
1229   if (use_parallel_marking_threads()) {
1230     _parallel_workers->set_active_workers((int)active_workers);
1231     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1232     // and the decisions on that MT processing is made elsewhere.
1233     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1234     _parallel_workers->run_task(&markingTask);
1235   } else {
1236     markingTask.work(0);
1237   }
1238   print_stats();
1239 }
1240 
1241 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1242   // world is stopped at this checkpoint
1243   assert(SafepointSynchronize::is_at_safepoint(),
1244          "world should be stopped");
1245 
1246   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1247 
1248   // If a full collection has happened, we shouldn't do this.
1249   if (has_aborted()) {
1250     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1251     return;
1252   }
1253 
1254   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1255 
1256   if (VerifyDuringGC) {
1257     HandleMark hm;  // handle scope
1258     gclog_or_tty->print(" VerifyDuringGC:(before)");
1259     Universe::heap()->prepare_for_verify();
1260     Universe::verify(/* silent */ false,
1261                      /* option */ VerifyOption_G1UsePrevMarking);
1262   }
1263 
1264   G1CollectorPolicy* g1p = g1h->g1_policy();
1265   g1p->record_concurrent_mark_remark_start();
1266 
1267   double start = os::elapsedTime();
1268 
1269   checkpointRootsFinalWork();
1270 
1271   double mark_work_end = os::elapsedTime();
1272 
1273   weakRefsWork(clear_all_soft_refs);
1274 
1275   if (has_overflown()) {
1276     // Oops.  We overflowed.  Restart concurrent marking.
1277     _restart_for_overflow = true;
1278     if (G1TraceMarkStackOverflow) {
1279       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1280     }
1281 
1282     // Verify the heap w.r.t. the previous marking bitmap.
1283     if (VerifyDuringGC) {
1284       HandleMark hm;  // handle scope
1285       gclog_or_tty->print(" VerifyDuringGC:(overflow)");
1286       Universe::heap()->prepare_for_verify();
1287       Universe::verify(/* silent */ false,
1288                        /* option */ VerifyOption_G1UsePrevMarking);
1289     }
1290 
1291     // Clear the marking state because we will be restarting
1292     // marking due to overflowing the global mark stack.
1293     reset_marking_state();
1294   } else {
1295     // Aggregate the per-task counting data that we have accumulated
1296     // while marking.
1297     aggregate_count_data();
1298 
1299     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1300     // We're done with marking.
1301     // This is the end of  the marking cycle, we're expected all
1302     // threads to have SATB queues with active set to true.
1303     satb_mq_set.set_active_all_threads(false, /* new active value */
1304                                        true /* expected_active */);
1305 
1306     if (VerifyDuringGC) {
1307       HandleMark hm;  // handle scope
1308       gclog_or_tty->print(" VerifyDuringGC:(after)");
1309       Universe::heap()->prepare_for_verify();
1310       Universe::verify(/* silent */ false,
1311                        /* option */ VerifyOption_G1UseNextMarking);
1312     }
1313     assert(!restart_for_overflow(), "sanity");
1314     // Completely reset the marking state since marking completed
1315     set_non_marking_state();
1316   }
1317 
1318   // Expand the marking stack, if we have to and if we can.
1319   if (_markStack.should_expand()) {
1320     _markStack.expand();
1321   }
1322 
1323 #if VERIFY_OBJS_PROCESSED
1324   _scan_obj_cl.objs_processed = 0;
1325   ThreadLocalObjQueue::objs_enqueued = 0;
1326 #endif
1327 
1328   // Statistics
1329   double now = os::elapsedTime();
1330   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1331   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1332   _remark_times.add((now - start) * 1000.0);
1333 
1334   g1p->record_concurrent_mark_remark_end();
1335 }
1336 
1337 // Base class of the closures that finalize and verify the
1338 // liveness counting data.
1339 class CMCountDataClosureBase: public HeapRegionClosure {
1340 protected:
1341   G1CollectedHeap* _g1h;
1342   ConcurrentMark* _cm;
1343   CardTableModRefBS* _ct_bs;
1344 
1345   BitMap* _region_bm;
1346   BitMap* _card_bm;
1347 
1348   // Takes a region that's not empty (i.e., it has at least one
1349   // live object in it and sets its corresponding bit on the region
1350   // bitmap to 1. If the region is "starts humongous" it will also set
1351   // to 1 the bits on the region bitmap that correspond to its
1352   // associated "continues humongous" regions.
1353   void set_bit_for_region(HeapRegion* hr) {
1354     assert(!hr->continuesHumongous(), "should have filtered those out");
1355 
1356     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1357     if (!hr->startsHumongous()) {
1358       // Normal (non-humongous) case: just set the bit.
1359       _region_bm->par_at_put(index, true);
1360     } else {
1361       // Starts humongous case: calculate how many regions are part of
1362       // this humongous region and then set the bit range.
1363       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1364       _region_bm->par_at_put_range(index, end_index, true);
1365     }
1366   }
1367 
1368 public:
1369   CMCountDataClosureBase(G1CollectedHeap* g1h,
1370                          BitMap* region_bm, BitMap* card_bm):
1371     _g1h(g1h), _cm(g1h->concurrent_mark()),
1372     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1373     _region_bm(region_bm), _card_bm(card_bm) { }
1374 };
1375 
1376 // Closure that calculates the # live objects per region. Used
1377 // for verification purposes during the cleanup pause.
1378 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1379   CMBitMapRO* _bm;
1380   size_t _region_marked_bytes;
1381 
1382 public:
1383   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1384                          BitMap* region_bm, BitMap* card_bm) :
1385     CMCountDataClosureBase(g1h, region_bm, card_bm),
1386     _bm(bm), _region_marked_bytes(0) { }
1387 
1388   bool doHeapRegion(HeapRegion* hr) {
1389 
1390     if (hr->continuesHumongous()) {
1391       // We will ignore these here and process them when their
1392       // associated "starts humongous" region is processed (see
1393       // set_bit_for_heap_region()). Note that we cannot rely on their
1394       // associated "starts humongous" region to have their bit set to
1395       // 1 since, due to the region chunking in the parallel region
1396       // iteration, a "continues humongous" region might be visited
1397       // before its associated "starts humongous".
1398       return false;
1399     }
1400 
1401     HeapWord* ntams = hr->next_top_at_mark_start();
1402     HeapWord* start = hr->bottom();
1403 
1404     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1405            err_msg("Preconditions not met - "
1406                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1407                    start, ntams, hr->end()));
1408 
1409     // Find the first marked object at or after "start".
1410     start = _bm->getNextMarkedWordAddress(start, ntams);
1411 
1412     size_t marked_bytes = 0;
1413 
1414     while (start < ntams) {
1415       oop obj = oop(start);
1416       int obj_sz = obj->size();
1417       HeapWord* obj_end = start + obj_sz;
1418 
1419       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1420       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1421 
1422       // Note: if we're looking at the last region in heap - obj_end
1423       // could be actually just beyond the end of the heap; end_idx
1424       // will then correspond to a (non-existent) card that is also
1425       // just beyond the heap.
1426       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1427         // end of object is not card aligned - increment to cover
1428         // all the cards spanned by the object
1429         end_idx += 1;
1430       }
1431 
1432       // Set the bits in the card BM for the cards spanned by this object.
1433       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1434 
1435       // Add the size of this object to the number of marked bytes.
1436       marked_bytes += (size_t)obj_sz * HeapWordSize;
1437 
1438       // Find the next marked object after this one.
1439       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1440     }
1441 
1442     // Mark the allocated-since-marking portion...
1443     HeapWord* top = hr->top();
1444     if (ntams < top) {
1445       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1446       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1447 
1448       // Note: if we're looking at the last region in heap - top
1449       // could be actually just beyond the end of the heap; end_idx
1450       // will then correspond to a (non-existent) card that is also
1451       // just beyond the heap.
1452       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1453         // end of object is not card aligned - increment to cover
1454         // all the cards spanned by the object
1455         end_idx += 1;
1456       }
1457       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1458 
1459       // This definitely means the region has live objects.
1460       set_bit_for_region(hr);
1461     }
1462 
1463     // Update the live region bitmap.
1464     if (marked_bytes > 0) {
1465       set_bit_for_region(hr);
1466     }
1467 
1468     // Set the marked bytes for the current region so that
1469     // it can be queried by a calling verificiation routine
1470     _region_marked_bytes = marked_bytes;
1471 
1472     return false;
1473   }
1474 
1475   size_t region_marked_bytes() const { return _region_marked_bytes; }
1476 };
1477 
1478 // Heap region closure used for verifying the counting data
1479 // that was accumulated concurrently and aggregated during
1480 // the remark pause. This closure is applied to the heap
1481 // regions during the STW cleanup pause.
1482 
1483 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1484   G1CollectedHeap* _g1h;
1485   ConcurrentMark* _cm;
1486   CalcLiveObjectsClosure _calc_cl;
1487   BitMap* _region_bm;   // Region BM to be verified
1488   BitMap* _card_bm;     // Card BM to be verified
1489   bool _verbose;        // verbose output?
1490 
1491   BitMap* _exp_region_bm; // Expected Region BM values
1492   BitMap* _exp_card_bm;   // Expected card BM values
1493 
1494   int _failures;
1495 
1496 public:
1497   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1498                                 BitMap* region_bm,
1499                                 BitMap* card_bm,
1500                                 BitMap* exp_region_bm,
1501                                 BitMap* exp_card_bm,
1502                                 bool verbose) :
1503     _g1h(g1h), _cm(g1h->concurrent_mark()),
1504     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1505     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1506     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1507     _failures(0) { }
1508 
1509   int failures() const { return _failures; }
1510 
1511   bool doHeapRegion(HeapRegion* hr) {
1512     if (hr->continuesHumongous()) {
1513       // We will ignore these here and process them when their
1514       // associated "starts humongous" region is processed (see
1515       // set_bit_for_heap_region()). Note that we cannot rely on their
1516       // associated "starts humongous" region to have their bit set to
1517       // 1 since, due to the region chunking in the parallel region
1518       // iteration, a "continues humongous" region might be visited
1519       // before its associated "starts humongous".
1520       return false;
1521     }
1522 
1523     int failures = 0;
1524 
1525     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1526     // this region and set the corresponding bits in the expected region
1527     // and card bitmaps.
1528     bool res = _calc_cl.doHeapRegion(hr);
1529     assert(res == false, "should be continuing");
1530 
1531     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1532                     Mutex::_no_safepoint_check_flag);
1533 
1534     // Verify the marked bytes for this region.
1535     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1536     size_t act_marked_bytes = hr->next_marked_bytes();
1537 
1538     // We're not OK if expected marked bytes > actual marked bytes. It means
1539     // we have missed accounting some objects during the actual marking.
1540     if (exp_marked_bytes > act_marked_bytes) {
1541       if (_verbose) {
1542         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1543                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1544                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1545       }
1546       failures += 1;
1547     }
1548 
1549     // Verify the bit, for this region, in the actual and expected
1550     // (which was just calculated) region bit maps.
1551     // We're not OK if the bit in the calculated expected region
1552     // bitmap is set and the bit in the actual region bitmap is not.
1553     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1554 
1555     bool expected = _exp_region_bm->at(index);
1556     bool actual = _region_bm->at(index);
1557     if (expected && !actual) {
1558       if (_verbose) {
1559         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1560                                "expected: %s, actual: %s",
1561                                hr->hrs_index(),
1562                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1563       }
1564       failures += 1;
1565     }
1566 
1567     // Verify that the card bit maps for the cards spanned by the current
1568     // region match. We have an error if we have a set bit in the expected
1569     // bit map and the corresponding bit in the actual bitmap is not set.
1570 
1571     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1572     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1573 
1574     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1575       expected = _exp_card_bm->at(i);
1576       actual = _card_bm->at(i);
1577 
1578       if (expected && !actual) {
1579         if (_verbose) {
1580           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1581                                  "expected: %s, actual: %s",
1582                                  hr->hrs_index(), i,
1583                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1584         }
1585         failures += 1;
1586       }
1587     }
1588 
1589     if (failures > 0 && _verbose)  {
1590       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1591                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1592                              HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1593                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1594     }
1595 
1596     _failures += failures;
1597 
1598     // We could stop iteration over the heap when we
1599     // find the first violating region by returning true.
1600     return false;
1601   }
1602 };
1603 
1604 
1605 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1606 protected:
1607   G1CollectedHeap* _g1h;
1608   ConcurrentMark* _cm;
1609   BitMap* _actual_region_bm;
1610   BitMap* _actual_card_bm;
1611 
1612   uint    _n_workers;
1613 
1614   BitMap* _expected_region_bm;
1615   BitMap* _expected_card_bm;
1616 
1617   int  _failures;
1618   bool _verbose;
1619 
1620 public:
1621   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1622                             BitMap* region_bm, BitMap* card_bm,
1623                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1624     : AbstractGangTask("G1 verify final counting"),
1625       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1626       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1627       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1628       _failures(0), _verbose(false),
1629       _n_workers(0) {
1630     assert(VerifyDuringGC, "don't call this otherwise");
1631 
1632     // Use the value already set as the number of active threads
1633     // in the call to run_task().
1634     if (G1CollectedHeap::use_parallel_gc_threads()) {
1635       assert( _g1h->workers()->active_workers() > 0,
1636         "Should have been previously set");
1637       _n_workers = _g1h->workers()->active_workers();
1638     } else {
1639       _n_workers = 1;
1640     }
1641 
1642     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1643     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1644 
1645     _verbose = _cm->verbose_medium();
1646   }
1647 
1648   void work(uint worker_id) {
1649     assert(worker_id < _n_workers, "invariant");
1650 
1651     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1652                                             _actual_region_bm, _actual_card_bm,
1653                                             _expected_region_bm,
1654                                             _expected_card_bm,
1655                                             _verbose);
1656 
1657     if (G1CollectedHeap::use_parallel_gc_threads()) {
1658       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1659                                             worker_id,
1660                                             _n_workers,
1661                                             HeapRegion::VerifyCountClaimValue);
1662     } else {
1663       _g1h->heap_region_iterate(&verify_cl);
1664     }
1665 
1666     Atomic::add(verify_cl.failures(), &_failures);
1667   }
1668 
1669   int failures() const { return _failures; }
1670 };
1671 
1672 // Closure that finalizes the liveness counting data.
1673 // Used during the cleanup pause.
1674 // Sets the bits corresponding to the interval [NTAMS, top]
1675 // (which contains the implicitly live objects) in the
1676 // card liveness bitmap. Also sets the bit for each region,
1677 // containing live data, in the region liveness bitmap.
1678 
1679 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1680  public:
1681   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1682                               BitMap* region_bm,
1683                               BitMap* card_bm) :
1684     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1685 
1686   bool doHeapRegion(HeapRegion* hr) {
1687 
1688     if (hr->continuesHumongous()) {
1689       // We will ignore these here and process them when their
1690       // associated "starts humongous" region is processed (see
1691       // set_bit_for_heap_region()). Note that we cannot rely on their
1692       // associated "starts humongous" region to have their bit set to
1693       // 1 since, due to the region chunking in the parallel region
1694       // iteration, a "continues humongous" region might be visited
1695       // before its associated "starts humongous".
1696       return false;
1697     }
1698 
1699     HeapWord* ntams = hr->next_top_at_mark_start();
1700     HeapWord* top   = hr->top();
1701 
1702     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1703 
1704     // Mark the allocated-since-marking portion...
1705     if (ntams < top) {
1706       // This definitely means the region has live objects.
1707       set_bit_for_region(hr);
1708 
1709       // Now set the bits in the card bitmap for [ntams, top)
1710       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1711       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1712 
1713       // Note: if we're looking at the last region in heap - top
1714       // could be actually just beyond the end of the heap; end_idx
1715       // will then correspond to a (non-existent) card that is also
1716       // just beyond the heap.
1717       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1718         // end of object is not card aligned - increment to cover
1719         // all the cards spanned by the object
1720         end_idx += 1;
1721       }
1722 
1723       assert(end_idx <= _card_bm->size(),
1724              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1725                      end_idx, _card_bm->size()));
1726       assert(start_idx < _card_bm->size(),
1727              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1728                      start_idx, _card_bm->size()));
1729 
1730       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1731     }
1732 
1733     // Set the bit for the region if it contains live data
1734     if (hr->next_marked_bytes() > 0) {
1735       set_bit_for_region(hr);
1736     }
1737 
1738     return false;
1739   }
1740 };
1741 
1742 class G1ParFinalCountTask: public AbstractGangTask {
1743 protected:
1744   G1CollectedHeap* _g1h;
1745   ConcurrentMark* _cm;
1746   BitMap* _actual_region_bm;
1747   BitMap* _actual_card_bm;
1748 
1749   uint    _n_workers;
1750 
1751 public:
1752   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1753     : AbstractGangTask("G1 final counting"),
1754       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1755       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1756       _n_workers(0) {
1757     // Use the value already set as the number of active threads
1758     // in the call to run_task().
1759     if (G1CollectedHeap::use_parallel_gc_threads()) {
1760       assert( _g1h->workers()->active_workers() > 0,
1761         "Should have been previously set");
1762       _n_workers = _g1h->workers()->active_workers();
1763     } else {
1764       _n_workers = 1;
1765     }
1766   }
1767 
1768   void work(uint worker_id) {
1769     assert(worker_id < _n_workers, "invariant");
1770 
1771     FinalCountDataUpdateClosure final_update_cl(_g1h,
1772                                                 _actual_region_bm,
1773                                                 _actual_card_bm);
1774 
1775     if (G1CollectedHeap::use_parallel_gc_threads()) {
1776       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1777                                             worker_id,
1778                                             _n_workers,
1779                                             HeapRegion::FinalCountClaimValue);
1780     } else {
1781       _g1h->heap_region_iterate(&final_update_cl);
1782     }
1783   }
1784 };
1785 
1786 class G1ParNoteEndTask;
1787 
1788 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1789   G1CollectedHeap* _g1;
1790   int _worker_num;
1791   size_t _max_live_bytes;
1792   uint _regions_claimed;
1793   size_t _freed_bytes;
1794   FreeRegionList* _local_cleanup_list;
1795   OldRegionSet* _old_proxy_set;
1796   HumongousRegionSet* _humongous_proxy_set;
1797   HRRSCleanupTask* _hrrs_cleanup_task;
1798   double _claimed_region_time;
1799   double _max_region_time;
1800 
1801 public:
1802   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1803                              int worker_num,
1804                              FreeRegionList* local_cleanup_list,
1805                              OldRegionSet* old_proxy_set,
1806                              HumongousRegionSet* humongous_proxy_set,
1807                              HRRSCleanupTask* hrrs_cleanup_task) :
1808     _g1(g1), _worker_num(worker_num),
1809     _max_live_bytes(0), _regions_claimed(0),
1810     _freed_bytes(0),
1811     _claimed_region_time(0.0), _max_region_time(0.0),
1812     _local_cleanup_list(local_cleanup_list),
1813     _old_proxy_set(old_proxy_set),
1814     _humongous_proxy_set(humongous_proxy_set),
1815     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1816 
1817   size_t freed_bytes() { return _freed_bytes; }
1818 
1819   bool doHeapRegion(HeapRegion *hr) {
1820     if (hr->continuesHumongous()) {
1821       return false;
1822     }
1823     // We use a claim value of zero here because all regions
1824     // were claimed with value 1 in the FinalCount task.
1825     _g1->reset_gc_time_stamps(hr);
1826     double start = os::elapsedTime();
1827     _regions_claimed++;
1828     hr->note_end_of_marking();
1829     _max_live_bytes += hr->max_live_bytes();
1830     _g1->free_region_if_empty(hr,
1831                               &_freed_bytes,
1832                               _local_cleanup_list,
1833                               _old_proxy_set,
1834                               _humongous_proxy_set,
1835                               _hrrs_cleanup_task,
1836                               true /* par */);
1837     double region_time = (os::elapsedTime() - start);
1838     _claimed_region_time += region_time;
1839     if (region_time > _max_region_time) {
1840       _max_region_time = region_time;
1841     }
1842     return false;
1843   }
1844 
1845   size_t max_live_bytes() { return _max_live_bytes; }
1846   uint regions_claimed() { return _regions_claimed; }
1847   double claimed_region_time_sec() { return _claimed_region_time; }
1848   double max_region_time_sec() { return _max_region_time; }
1849 };
1850 
1851 class G1ParNoteEndTask: public AbstractGangTask {
1852   friend class G1NoteEndOfConcMarkClosure;
1853 
1854 protected:
1855   G1CollectedHeap* _g1h;
1856   size_t _max_live_bytes;
1857   size_t _freed_bytes;
1858   FreeRegionList* _cleanup_list;
1859 
1860 public:
1861   G1ParNoteEndTask(G1CollectedHeap* g1h,
1862                    FreeRegionList* cleanup_list) :
1863     AbstractGangTask("G1 note end"), _g1h(g1h),
1864     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1865 
1866   void work(uint worker_id) {
1867     double start = os::elapsedTime();
1868     FreeRegionList local_cleanup_list("Local Cleanup List");
1869     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
1870     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1871     HRRSCleanupTask hrrs_cleanup_task;
1872     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1873                                            &old_proxy_set,
1874                                            &humongous_proxy_set,
1875                                            &hrrs_cleanup_task);
1876     if (G1CollectedHeap::use_parallel_gc_threads()) {
1877       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1878                                             _g1h->workers()->active_workers(),
1879                                             HeapRegion::NoteEndClaimValue);
1880     } else {
1881       _g1h->heap_region_iterate(&g1_note_end);
1882     }
1883     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1884 
1885     // Now update the lists
1886     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
1887                                             NULL /* free_list */,
1888                                             &old_proxy_set,
1889                                             &humongous_proxy_set,
1890                                             true /* par */);
1891     {
1892       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1893       _max_live_bytes += g1_note_end.max_live_bytes();
1894       _freed_bytes += g1_note_end.freed_bytes();
1895 
1896       // If we iterate over the global cleanup list at the end of
1897       // cleanup to do this printing we will not guarantee to only
1898       // generate output for the newly-reclaimed regions (the list
1899       // might not be empty at the beginning of cleanup; we might
1900       // still be working on its previous contents). So we do the
1901       // printing here, before we append the new regions to the global
1902       // cleanup list.
1903 
1904       G1HRPrinter* hr_printer = _g1h->hr_printer();
1905       if (hr_printer->is_active()) {
1906         HeapRegionLinkedListIterator iter(&local_cleanup_list);
1907         while (iter.more_available()) {
1908           HeapRegion* hr = iter.get_next();
1909           hr_printer->cleanup(hr);
1910         }
1911       }
1912 
1913       _cleanup_list->add_as_tail(&local_cleanup_list);
1914       assert(local_cleanup_list.is_empty(), "post-condition");
1915 
1916       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1917     }
1918   }
1919   size_t max_live_bytes() { return _max_live_bytes; }
1920   size_t freed_bytes() { return _freed_bytes; }
1921 };
1922 
1923 class G1ParScrubRemSetTask: public AbstractGangTask {
1924 protected:
1925   G1RemSet* _g1rs;
1926   BitMap* _region_bm;
1927   BitMap* _card_bm;
1928 public:
1929   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1930                        BitMap* region_bm, BitMap* card_bm) :
1931     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1932     _region_bm(region_bm), _card_bm(card_bm) { }
1933 
1934   void work(uint worker_id) {
1935     if (G1CollectedHeap::use_parallel_gc_threads()) {
1936       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1937                        HeapRegion::ScrubRemSetClaimValue);
1938     } else {
1939       _g1rs->scrub(_region_bm, _card_bm);
1940     }
1941   }
1942 
1943 };
1944 
1945 void ConcurrentMark::cleanup() {
1946   // world is stopped at this checkpoint
1947   assert(SafepointSynchronize::is_at_safepoint(),
1948          "world should be stopped");
1949   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1950 
1951   // If a full collection has happened, we shouldn't do this.
1952   if (has_aborted()) {
1953     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1954     return;
1955   }
1956 
1957   HRSPhaseSetter x(HRSPhaseCleanup);
1958   g1h->verify_region_sets_optional();
1959 
1960   if (VerifyDuringGC) {
1961     HandleMark hm;  // handle scope
1962     gclog_or_tty->print(" VerifyDuringGC:(before)");
1963     Universe::heap()->prepare_for_verify();
1964     Universe::verify(/* silent */ false,
1965                      /* option */ VerifyOption_G1UsePrevMarking);
1966   }
1967 
1968   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1969   g1p->record_concurrent_mark_cleanup_start();
1970 
1971   double start = os::elapsedTime();
1972 
1973   HeapRegionRemSet::reset_for_cleanup_tasks();
1974 
1975   uint n_workers;
1976 
1977   // Do counting once more with the world stopped for good measure.
1978   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1979 
1980   if (G1CollectedHeap::use_parallel_gc_threads()) {
1981    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1982            "sanity check");
1983 
1984     g1h->set_par_threads();
1985     n_workers = g1h->n_par_threads();
1986     assert(g1h->n_par_threads() == n_workers,
1987            "Should not have been reset");
1988     g1h->workers()->run_task(&g1_par_count_task);
1989     // Done with the parallel phase so reset to 0.
1990     g1h->set_par_threads(0);
1991 
1992     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
1993            "sanity check");
1994   } else {
1995     n_workers = 1;
1996     g1_par_count_task.work(0);
1997   }
1998 
1999   if (VerifyDuringGC) {
2000     // Verify that the counting data accumulated during marking matches
2001     // that calculated by walking the marking bitmap.
2002 
2003     // Bitmaps to hold expected values
2004     BitMap expected_region_bm(_region_bm.size(), false);
2005     BitMap expected_card_bm(_card_bm.size(), false);
2006 
2007     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2008                                                  &_region_bm,
2009                                                  &_card_bm,
2010                                                  &expected_region_bm,
2011                                                  &expected_card_bm);
2012 
2013     if (G1CollectedHeap::use_parallel_gc_threads()) {
2014       g1h->set_par_threads((int)n_workers);
2015       g1h->workers()->run_task(&g1_par_verify_task);
2016       // Done with the parallel phase so reset to 0.
2017       g1h->set_par_threads(0);
2018 
2019       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2020              "sanity check");
2021     } else {
2022       g1_par_verify_task.work(0);
2023     }
2024 
2025     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2026   }
2027 
2028   size_t start_used_bytes = g1h->used();
2029   g1h->set_marking_complete();
2030 
2031   double count_end = os::elapsedTime();
2032   double this_final_counting_time = (count_end - start);
2033   _total_counting_time += this_final_counting_time;
2034 
2035   if (G1PrintRegionLivenessInfo) {
2036     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2037     _g1h->heap_region_iterate(&cl);
2038   }
2039 
2040   // Install newly created mark bitMap as "prev".
2041   swapMarkBitMaps();
2042 
2043   g1h->reset_gc_time_stamp();
2044 
2045   // Note end of marking in all heap regions.
2046   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2047   if (G1CollectedHeap::use_parallel_gc_threads()) {
2048     g1h->set_par_threads((int)n_workers);
2049     g1h->workers()->run_task(&g1_par_note_end_task);
2050     g1h->set_par_threads(0);
2051 
2052     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2053            "sanity check");
2054   } else {
2055     g1_par_note_end_task.work(0);
2056   }
2057   g1h->check_gc_time_stamps();
2058 
2059   if (!cleanup_list_is_empty()) {
2060     // The cleanup list is not empty, so we'll have to process it
2061     // concurrently. Notify anyone else that might be wanting free
2062     // regions that there will be more free regions coming soon.
2063     g1h->set_free_regions_coming();
2064   }
2065 
2066   // call below, since it affects the metric by which we sort the heap
2067   // regions.
2068   if (G1ScrubRemSets) {
2069     double rs_scrub_start = os::elapsedTime();
2070     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2071     if (G1CollectedHeap::use_parallel_gc_threads()) {
2072       g1h->set_par_threads((int)n_workers);
2073       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2074       g1h->set_par_threads(0);
2075 
2076       assert(g1h->check_heap_region_claim_values(
2077                                             HeapRegion::ScrubRemSetClaimValue),
2078              "sanity check");
2079     } else {
2080       g1_par_scrub_rs_task.work(0);
2081     }
2082 
2083     double rs_scrub_end = os::elapsedTime();
2084     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2085     _total_rs_scrub_time += this_rs_scrub_time;
2086   }
2087 
2088   // this will also free any regions totally full of garbage objects,
2089   // and sort the regions.
2090   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2091 
2092   // Statistics.
2093   double end = os::elapsedTime();
2094   _cleanup_times.add((end - start) * 1000.0);
2095 
2096   if (G1Log::fine()) {
2097     g1h->print_size_transition(gclog_or_tty,
2098                                start_used_bytes,
2099                                g1h->used(),
2100                                g1h->capacity());
2101   }
2102 
2103   // Clean up will have freed any regions completely full of garbage.
2104   // Update the soft reference policy with the new heap occupancy.
2105   Universe::update_heap_info_at_gc();
2106 
2107   // We need to make this be a "collection" so any collection pause that
2108   // races with it goes around and waits for completeCleanup to finish.
2109   g1h->increment_total_collections();
2110 
2111   // We reclaimed old regions so we should calculate the sizes to make
2112   // sure we update the old gen/space data.
2113   g1h->g1mm()->update_sizes();
2114 
2115   if (VerifyDuringGC) {
2116     HandleMark hm;  // handle scope
2117     gclog_or_tty->print(" VerifyDuringGC:(after)");
2118     Universe::heap()->prepare_for_verify();
2119     Universe::verify(/* silent */ false,
2120                      /* option */ VerifyOption_G1UsePrevMarking);
2121   }
2122 
2123   g1h->verify_region_sets_optional();
2124 }
2125 
2126 void ConcurrentMark::completeCleanup() {
2127   if (has_aborted()) return;
2128 
2129   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2130 
2131   _cleanup_list.verify_optional();
2132   FreeRegionList tmp_free_list("Tmp Free List");
2133 
2134   if (G1ConcRegionFreeingVerbose) {
2135     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2136                            "cleanup list has %u entries",
2137                            _cleanup_list.length());
2138   }
2139 
2140   // Noone else should be accessing the _cleanup_list at this point,
2141   // so it's not necessary to take any locks
2142   while (!_cleanup_list.is_empty()) {
2143     HeapRegion* hr = _cleanup_list.remove_head();
2144     assert(hr != NULL, "the list was not empty");
2145     hr->par_clear();
2146     tmp_free_list.add_as_tail(hr);
2147 
2148     // Instead of adding one region at a time to the secondary_free_list,
2149     // we accumulate them in the local list and move them a few at a
2150     // time. This also cuts down on the number of notify_all() calls
2151     // we do during this process. We'll also append the local list when
2152     // _cleanup_list is empty (which means we just removed the last
2153     // region from the _cleanup_list).
2154     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2155         _cleanup_list.is_empty()) {
2156       if (G1ConcRegionFreeingVerbose) {
2157         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2158                                "appending %u entries to the secondary_free_list, "
2159                                "cleanup list still has %u entries",
2160                                tmp_free_list.length(),
2161                                _cleanup_list.length());
2162       }
2163 
2164       {
2165         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2166         g1h->secondary_free_list_add_as_tail(&tmp_free_list);
2167         SecondaryFreeList_lock->notify_all();
2168       }
2169 
2170       if (G1StressConcRegionFreeing) {
2171         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2172           os::sleep(Thread::current(), (jlong) 1, false);
2173         }
2174       }
2175     }
2176   }
2177   assert(tmp_free_list.is_empty(), "post-condition");
2178 }
2179 
2180 // Supporting Object and Oop closures for reference discovery
2181 // and processing in during marking
2182 
2183 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2184   HeapWord* addr = (HeapWord*)obj;
2185   return addr != NULL &&
2186          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2187 }
2188 
2189 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2190 // Uses the CMTask associated with a worker thread (for serial reference
2191 // processing the CMTask for worker 0 is used) to preserve (mark) and
2192 // trace referent objects.
2193 //
2194 // Using the CMTask and embedded local queues avoids having the worker
2195 // threads operating on the global mark stack. This reduces the risk
2196 // of overflowing the stack - which we would rather avoid at this late
2197 // state. Also using the tasks' local queues removes the potential
2198 // of the workers interfering with each other that could occur if
2199 // operating on the global stack.
2200 
2201 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2202   ConcurrentMark*  _cm;
2203   CMTask*          _task;
2204   int              _ref_counter_limit;
2205   int              _ref_counter;
2206  public:
2207   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
2208     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval) {
2209     assert(_ref_counter_limit > 0, "sanity");
2210     _ref_counter = _ref_counter_limit;
2211   }
2212 
2213   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2214   virtual void do_oop(      oop* p) { do_oop_work(p); }
2215 
2216   template <class T> void do_oop_work(T* p) {
2217     if (!_cm->has_overflown()) {
2218       oop obj = oopDesc::load_decode_heap_oop(p);
2219       if (_cm->verbose_high()) {
2220         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2221                                "*"PTR_FORMAT" = "PTR_FORMAT,
2222                                _task->worker_id(), p, (void*) obj);
2223       }
2224 
2225       _task->deal_with_reference(obj);
2226       _ref_counter--;
2227 
2228       if (_ref_counter == 0) {
2229         // We have dealt with _ref_counter_limit references, pushing them
2230         // and objects reachable from them on to the local stack (and
2231         // possibly the global stack). Call CMTask::do_marking_step() to
2232         // process these entries.
2233         //
2234         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2235         // there's nothing more to do (i.e. we're done with the entries that
2236         // were pushed as a result of the CMTask::deal_with_reference() calls
2237         // above) or we overflow.
2238         //
2239         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2240         // flag while there may still be some work to do. (See the comment at
2241         // the beginning of CMTask::do_marking_step() for those conditions -
2242         // one of which is reaching the specified time target.) It is only
2243         // when CMTask::do_marking_step() returns without setting the
2244         // has_aborted() flag that the marking step has completed.
2245         do {
2246           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2247           _task->do_marking_step(mark_step_duration_ms,
2248                                  false /* do_stealing    */,
2249                                  false /* do_termination */);
2250         } while (_task->has_aborted() && !_cm->has_overflown());
2251         _ref_counter = _ref_counter_limit;
2252       }
2253     } else {
2254       if (_cm->verbose_high()) {
2255          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2256       }
2257     }
2258   }
2259 };
2260 
2261 // 'Drain' oop closure used by both serial and parallel reference processing.
2262 // Uses the CMTask associated with a given worker thread (for serial
2263 // reference processing the CMtask for worker 0 is used). Calls the
2264 // do_marking_step routine, with an unbelievably large timeout value,
2265 // to drain the marking data structures of the remaining entries
2266 // added by the 'keep alive' oop closure above.
2267 
2268 class G1CMDrainMarkingStackClosure: public VoidClosure {
2269   ConcurrentMark* _cm;
2270   CMTask*         _task;
2271   bool            _do_stealing;
2272   bool            _do_termination;
2273  public:
2274   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_par) :
2275     _cm(cm), _task(task) {
2276     assert(is_par || _task->worker_id() == 0,
2277            "Only task for worker 0 should be used if ref processing is single threaded");
2278     // We only allow stealing and only enter the termination protocol
2279     // in CMTask::do_marking_step() if this closure is being instantiated
2280     // for parallel reference processing.
2281     _do_stealing = _do_termination = is_par;
2282   }
2283 
2284   void do_void() {
2285     do {
2286       if (_cm->verbose_high()) {
2287         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - "
2288                                "stealing: %s, termination: %s",
2289                                _task->worker_id(),
2290                                BOOL_TO_STR(_do_stealing),
2291                                BOOL_TO_STR(_do_termination));
2292       }
2293 
2294       // We call CMTask::do_marking_step() to completely drain the local
2295       // and global marking stacks of entries pushed by the 'keep alive'
2296       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2297       //
2298       // CMTask::do_marking_step() is called in a loop, which we'll exit
2299       // if there's nothing more to do (i.e. we'completely drained the
2300       // entries that were pushed as a a result of applying the 'keep alive'
2301       // closure to the entries on the discovered ref lists) or we overflow
2302       // the global marking stack.
2303       //
2304       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2305       // flag while there may still be some work to do. (See the comment at
2306       // the beginning of CMTask::do_marking_step() for those conditions -
2307       // one of which is reaching the specified time target.) It is only
2308       // when CMTask::do_marking_step() returns without setting the
2309       // has_aborted() flag that the marking step has completed.
2310 
2311       _task->do_marking_step(1000000000.0 /* something very large */,
2312                              _do_stealing,
2313                              _do_termination);
2314     } while (_task->has_aborted() && !_cm->has_overflown());
2315   }
2316 };
2317 
2318 // Implementation of AbstractRefProcTaskExecutor for parallel
2319 // reference processing at the end of G1 concurrent marking
2320 
2321 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2322 private:
2323   G1CollectedHeap* _g1h;
2324   ConcurrentMark*  _cm;
2325   WorkGang*        _workers;
2326   int              _active_workers;
2327 
2328 public:
2329   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2330                         ConcurrentMark* cm,
2331                         WorkGang* workers,
2332                         int n_workers) :
2333     _g1h(g1h), _cm(cm),
2334     _workers(workers), _active_workers(n_workers) { }
2335 
2336   // Executes the given task using concurrent marking worker threads.
2337   virtual void execute(ProcessTask& task);
2338   virtual void execute(EnqueueTask& task);
2339 };
2340 
2341 class G1CMRefProcTaskProxy: public AbstractGangTask {
2342   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2343   ProcessTask&     _proc_task;
2344   G1CollectedHeap* _g1h;
2345   ConcurrentMark*  _cm;
2346   bool             _processing_is_mt;
2347 
2348 public:
2349   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2350                      G1CollectedHeap* g1h,
2351                      ConcurrentMark* cm) :
2352     AbstractGangTask("Process reference objects in parallel"),
2353     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2354       ReferenceProcessor* rp = _g1h->ref_processor_cm();
2355       _processing_is_mt = rp->processing_is_mt();
2356     }
2357 
2358   virtual void work(uint worker_id) {
2359     CMTask* marking_task = _cm->task(worker_id);
2360     G1CMIsAliveClosure g1_is_alive(_g1h);
2361     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
2362     G1CMDrainMarkingStackClosure g1_par_drain(_cm, marking_task, _processing_is_mt);
2363 
2364     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2365   }
2366 };
2367 
2368 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2369   assert(_workers != NULL, "Need parallel worker threads.");
2370   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2371 
2372   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2373 
2374   // We need to reset the phase for each task execution so that
2375   // the termination protocol of CMTask::do_marking_step works.
2376   _cm->set_phase(_active_workers, false /* concurrent */);
2377   _g1h->set_par_threads(_active_workers);
2378   _workers->run_task(&proc_task_proxy);
2379   _g1h->set_par_threads(0);
2380 }
2381 
2382 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2383   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2384   EnqueueTask& _enq_task;
2385 
2386 public:
2387   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2388     AbstractGangTask("Enqueue reference objects in parallel"),
2389     _enq_task(enq_task) { }
2390 
2391   virtual void work(uint worker_id) {
2392     _enq_task.work(worker_id);
2393   }
2394 };
2395 
2396 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2397   assert(_workers != NULL, "Need parallel worker threads.");
2398   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2399 
2400   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2401 
2402   _g1h->set_par_threads(_active_workers);
2403   _workers->run_task(&enq_task_proxy);
2404   _g1h->set_par_threads(0);
2405 }
2406 
2407 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2408   if (has_overflown()) {
2409     // If we have overflown the marking stack then just return
2410     // without processing the discovered references. We will be
2411     // restarting marking because of the overflow and any
2412     // currently discovered reference will stay discovered.
2413     // They will be processed when the remark task successfully
2414     // completes.
2415     return;
2416   }
2417 
2418   ResourceMark rm;
2419   HandleMark   hm;
2420 
2421   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2422 
2423   // Is alive closure.
2424   G1CMIsAliveClosure g1_is_alive(g1h);
2425 
2426   // Inner scope to exclude the cleaning of the string and symbol
2427   // tables from the displayed time.
2428   {
2429     if (G1Log::finer()) {
2430       gclog_or_tty->put(' ');
2431     }
2432     TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2433 
2434     ReferenceProcessor* rp = g1h->ref_processor_cm();
2435 
2436     // See the comment in G1CollectedHeap::ref_processing_init()
2437     // about how reference processing currently works in G1.
2438 
2439     // Set the soft reference policy
2440     rp->setup_policy(clear_all_soft_refs);
2441     assert(_markStack.isEmpty(), "mark stack should be empty");
2442 
2443     // Non-MT instances 'Keep Alive' and 'Complete GC' oop closures.
2444     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0));
2445     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), false);
2446 
2447     // We need at least one active thread. If reference processing is
2448     // not multi-threaded we use the current (ConcurrentMarkThread) thread,
2449     // otherwise we use the work gang from the G1CollectedHeap and we
2450     // utilize all the worker threads we can.
2451     uint active_workers = (rp->processing_is_mt() && g1h->workers() != NULL
2452                                 ? g1h->workers()->active_workers()
2453                                 : 1U);
2454 
2455     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2456 
2457     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2458                                               g1h->workers(), active_workers);
2459 
2460     AbstractRefProcTaskExecutor* executor = (rp->processing_is_mt()
2461                                                 ? &par_task_executor
2462                                                 : NULL);
2463 
2464     // Set the degree of MT processing here.  If the discovery was done MT,
2465     // the number of threads involved during discovery could differ from
2466     // the number of active workers.  This is OK as long as the discovered
2467     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2468     rp->set_active_mt_degree(active_workers);
2469 
2470     // Process the weak references.
2471     rp->process_discovered_references(&g1_is_alive,
2472                                       &g1_keep_alive,
2473                                       &g1_drain_mark_stack,
2474                                       executor);
2475 
2476     // The do_oop work routines of the keep_alive and drain_marking_stack
2477     // oop closures will set the has_overflown flag if we overflow the
2478     // global marking stack.
2479 
2480     assert(_markStack.overflow() || _markStack.isEmpty(),
2481             "mark stack should be empty (unless it overflowed)");
2482     if (_markStack.overflow()) {
2483       // This should have been done already when we tried to push an
2484       // entry on to the global mark stack. But let's do it again.
2485       set_has_overflown();
2486     }
2487 
2488     assert(rp->num_q() == active_workers, "why not");
2489 
2490     rp->enqueue_discovered_references(executor);
2491 
2492     rp->verify_no_references_recorded();
2493     assert(!rp->discovery_enabled(), "Post condition");
2494   }
2495 
2496   // Now clean up stale oops in StringTable
2497   StringTable::unlink(&g1_is_alive);
2498   // Clean up unreferenced symbols in symbol table.
2499   SymbolTable::unlink();
2500 }
2501 
2502 void ConcurrentMark::swapMarkBitMaps() {
2503   CMBitMapRO* temp = _prevMarkBitMap;
2504   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2505   _nextMarkBitMap  = (CMBitMap*)  temp;
2506 }
2507 
2508 class CMRemarkTask: public AbstractGangTask {
2509 private:
2510   ConcurrentMark *_cm;
2511 
2512 public:
2513   void work(uint worker_id) {
2514     // Since all available tasks are actually started, we should
2515     // only proceed if we're supposed to be actived.
2516     if (worker_id < _cm->active_tasks()) {
2517       CMTask* task = _cm->task(worker_id);
2518       task->record_start_time();
2519       do {
2520         task->do_marking_step(1000000000.0 /* something very large */,
2521                               true /* do_stealing    */,
2522                               true /* do_termination */);
2523       } while (task->has_aborted() && !_cm->has_overflown());
2524       // If we overflow, then we do not want to restart. We instead
2525       // want to abort remark and do concurrent marking again.
2526       task->record_end_time();
2527     }
2528   }
2529 
2530   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2531     AbstractGangTask("Par Remark"), _cm(cm) {
2532     _cm->terminator()->reset_for_reuse(active_workers);
2533   }
2534 };
2535 
2536 void ConcurrentMark::checkpointRootsFinalWork() {
2537   ResourceMark rm;
2538   HandleMark   hm;
2539   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2540 
2541   g1h->ensure_parsability(false);
2542 
2543   if (G1CollectedHeap::use_parallel_gc_threads()) {
2544     G1CollectedHeap::StrongRootsScope srs(g1h);
2545     // this is remark, so we'll use up all active threads
2546     uint active_workers = g1h->workers()->active_workers();
2547     if (active_workers == 0) {
2548       assert(active_workers > 0, "Should have been set earlier");
2549       active_workers = (uint) ParallelGCThreads;
2550       g1h->workers()->set_active_workers(active_workers);
2551     }
2552     set_phase(active_workers, false /* concurrent */);
2553     // Leave _parallel_marking_threads at it's
2554     // value originally calculated in the ConcurrentMark
2555     // constructor and pass values of the active workers
2556     // through the gang in the task.
2557 
2558     CMRemarkTask remarkTask(this, active_workers);
2559     g1h->set_par_threads(active_workers);
2560     g1h->workers()->run_task(&remarkTask);
2561     g1h->set_par_threads(0);
2562   } else {
2563     G1CollectedHeap::StrongRootsScope srs(g1h);
2564     // this is remark, so we'll use up all available threads
2565     uint active_workers = 1;
2566     set_phase(active_workers, false /* concurrent */);
2567 
2568     CMRemarkTask remarkTask(this, active_workers);
2569     // We will start all available threads, even if we decide that the
2570     // active_workers will be fewer. The extra ones will just bail out
2571     // immediately.
2572     remarkTask.work(0);
2573   }
2574   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2575   guarantee(has_overflown() ||
2576             satb_mq_set.completed_buffers_num() == 0,
2577             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2578                     BOOL_TO_STR(has_overflown()),
2579                     satb_mq_set.completed_buffers_num()));
2580 
2581   print_stats();
2582 
2583 #if VERIFY_OBJS_PROCESSED
2584   if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2585     gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2586                            _scan_obj_cl.objs_processed,
2587                            ThreadLocalObjQueue::objs_enqueued);
2588     guarantee(_scan_obj_cl.objs_processed ==
2589               ThreadLocalObjQueue::objs_enqueued,
2590               "Different number of objs processed and enqueued.");
2591   }
2592 #endif
2593 }
2594 
2595 #ifndef PRODUCT
2596 
2597 class PrintReachableOopClosure: public OopClosure {
2598 private:
2599   G1CollectedHeap* _g1h;
2600   outputStream*    _out;
2601   VerifyOption     _vo;
2602   bool             _all;
2603 
2604 public:
2605   PrintReachableOopClosure(outputStream* out,
2606                            VerifyOption  vo,
2607                            bool          all) :
2608     _g1h(G1CollectedHeap::heap()),
2609     _out(out), _vo(vo), _all(all) { }
2610 
2611   void do_oop(narrowOop* p) { do_oop_work(p); }
2612   void do_oop(      oop* p) { do_oop_work(p); }
2613 
2614   template <class T> void do_oop_work(T* p) {
2615     oop         obj = oopDesc::load_decode_heap_oop(p);
2616     const char* str = NULL;
2617     const char* str2 = "";
2618 
2619     if (obj == NULL) {
2620       str = "";
2621     } else if (!_g1h->is_in_g1_reserved(obj)) {
2622       str = " O";
2623     } else {
2624       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2625       guarantee(hr != NULL, "invariant");
2626       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2627       bool marked = _g1h->is_marked(obj, _vo);
2628 
2629       if (over_tams) {
2630         str = " >";
2631         if (marked) {
2632           str2 = " AND MARKED";
2633         }
2634       } else if (marked) {
2635         str = " M";
2636       } else {
2637         str = " NOT";
2638       }
2639     }
2640 
2641     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2642                    p, (void*) obj, str, str2);
2643   }
2644 };
2645 
2646 class PrintReachableObjectClosure : public ObjectClosure {
2647 private:
2648   G1CollectedHeap* _g1h;
2649   outputStream*    _out;
2650   VerifyOption     _vo;
2651   bool             _all;
2652   HeapRegion*      _hr;
2653 
2654 public:
2655   PrintReachableObjectClosure(outputStream* out,
2656                               VerifyOption  vo,
2657                               bool          all,
2658                               HeapRegion*   hr) :
2659     _g1h(G1CollectedHeap::heap()),
2660     _out(out), _vo(vo), _all(all), _hr(hr) { }
2661 
2662   void do_object(oop o) {
2663     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2664     bool marked = _g1h->is_marked(o, _vo);
2665     bool print_it = _all || over_tams || marked;
2666 
2667     if (print_it) {
2668       _out->print_cr(" "PTR_FORMAT"%s",
2669                      o, (over_tams) ? " >" : (marked) ? " M" : "");
2670       PrintReachableOopClosure oopCl(_out, _vo, _all);
2671       o->oop_iterate_no_header(&oopCl);
2672     }
2673   }
2674 };
2675 
2676 class PrintReachableRegionClosure : public HeapRegionClosure {
2677 private:
2678   G1CollectedHeap* _g1h;
2679   outputStream*    _out;
2680   VerifyOption     _vo;
2681   bool             _all;
2682 
2683 public:
2684   bool doHeapRegion(HeapRegion* hr) {
2685     HeapWord* b = hr->bottom();
2686     HeapWord* e = hr->end();
2687     HeapWord* t = hr->top();
2688     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2689     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2690                    "TAMS: "PTR_FORMAT, b, e, t, p);
2691     _out->cr();
2692 
2693     HeapWord* from = b;
2694     HeapWord* to   = t;
2695 
2696     if (to > from) {
2697       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2698       _out->cr();
2699       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2700       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2701       _out->cr();
2702     }
2703 
2704     return false;
2705   }
2706 
2707   PrintReachableRegionClosure(outputStream* out,
2708                               VerifyOption  vo,
2709                               bool          all) :
2710     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2711 };
2712 
2713 void ConcurrentMark::print_reachable(const char* str,
2714                                      VerifyOption vo,
2715                                      bool all) {
2716   gclog_or_tty->cr();
2717   gclog_or_tty->print_cr("== Doing heap dump... ");
2718 
2719   if (G1PrintReachableBaseFile == NULL) {
2720     gclog_or_tty->print_cr("  #### error: no base file defined");
2721     return;
2722   }
2723 
2724   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2725       (JVM_MAXPATHLEN - 1)) {
2726     gclog_or_tty->print_cr("  #### error: file name too long");
2727     return;
2728   }
2729 
2730   char file_name[JVM_MAXPATHLEN];
2731   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2732   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2733 
2734   fileStream fout(file_name);
2735   if (!fout.is_open()) {
2736     gclog_or_tty->print_cr("  #### error: could not open file");
2737     return;
2738   }
2739 
2740   outputStream* out = &fout;
2741   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2742   out->cr();
2743 
2744   out->print_cr("--- ITERATING OVER REGIONS");
2745   out->cr();
2746   PrintReachableRegionClosure rcl(out, vo, all);
2747   _g1h->heap_region_iterate(&rcl);
2748   out->cr();
2749 
2750   gclog_or_tty->print_cr("  done");
2751   gclog_or_tty->flush();
2752 }
2753 
2754 #endif // PRODUCT
2755 
2756 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2757   // Note we are overriding the read-only view of the prev map here, via
2758   // the cast.
2759   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2760 }
2761 
2762 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2763   _nextMarkBitMap->clearRange(mr);
2764 }
2765 
2766 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2767   clearRangePrevBitmap(mr);
2768   clearRangeNextBitmap(mr);
2769 }
2770 
2771 HeapRegion*
2772 ConcurrentMark::claim_region(uint worker_id) {
2773   // "checkpoint" the finger
2774   HeapWord* finger = _finger;
2775 
2776   // _heap_end will not change underneath our feet; it only changes at
2777   // yield points.
2778   while (finger < _heap_end) {
2779     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2780 
2781     // Note on how this code handles humongous regions. In the
2782     // normal case the finger will reach the start of a "starts
2783     // humongous" (SH) region. Its end will either be the end of the
2784     // last "continues humongous" (CH) region in the sequence, or the
2785     // standard end of the SH region (if the SH is the only region in
2786     // the sequence). That way claim_region() will skip over the CH
2787     // regions. However, there is a subtle race between a CM thread
2788     // executing this method and a mutator thread doing a humongous
2789     // object allocation. The two are not mutually exclusive as the CM
2790     // thread does not need to hold the Heap_lock when it gets
2791     // here. So there is a chance that claim_region() will come across
2792     // a free region that's in the progress of becoming a SH or a CH
2793     // region. In the former case, it will either
2794     //   a) Miss the update to the region's end, in which case it will
2795     //      visit every subsequent CH region, will find their bitmaps
2796     //      empty, and do nothing, or
2797     //   b) Will observe the update of the region's end (in which case
2798     //      it will skip the subsequent CH regions).
2799     // If it comes across a region that suddenly becomes CH, the
2800     // scenario will be similar to b). So, the race between
2801     // claim_region() and a humongous object allocation might force us
2802     // to do a bit of unnecessary work (due to some unnecessary bitmap
2803     // iterations) but it should not introduce and correctness issues.
2804     HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
2805     HeapWord*   bottom        = curr_region->bottom();
2806     HeapWord*   end           = curr_region->end();
2807     HeapWord*   limit         = curr_region->next_top_at_mark_start();
2808 
2809     if (verbose_low()) {
2810       gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2811                              "["PTR_FORMAT", "PTR_FORMAT"), "
2812                              "limit = "PTR_FORMAT,
2813                              worker_id, curr_region, bottom, end, limit);
2814     }
2815 
2816     // Is the gap between reading the finger and doing the CAS too long?
2817     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2818     if (res == finger) {
2819       // we succeeded
2820 
2821       // notice that _finger == end cannot be guaranteed here since,
2822       // someone else might have moved the finger even further
2823       assert(_finger >= end, "the finger should have moved forward");
2824 
2825       if (verbose_low()) {
2826         gclog_or_tty->print_cr("[%u] we were successful with region = "
2827                                PTR_FORMAT, worker_id, curr_region);
2828       }
2829 
2830       if (limit > bottom) {
2831         if (verbose_low()) {
2832           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2833                                  "returning it ", worker_id, curr_region);
2834         }
2835         return curr_region;
2836       } else {
2837         assert(limit == bottom,
2838                "the region limit should be at bottom");
2839         if (verbose_low()) {
2840           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2841                                  "returning NULL", worker_id, curr_region);
2842         }
2843         // we return NULL and the caller should try calling
2844         // claim_region() again.
2845         return NULL;
2846       }
2847     } else {
2848       assert(_finger > finger, "the finger should have moved forward");
2849       if (verbose_low()) {
2850         gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2851                                "global finger = "PTR_FORMAT", "
2852                                "our finger = "PTR_FORMAT,
2853                                worker_id, _finger, finger);
2854       }
2855 
2856       // read it again
2857       finger = _finger;
2858     }
2859   }
2860 
2861   return NULL;
2862 }
2863 
2864 #ifndef PRODUCT
2865 enum VerifyNoCSetOopsPhase {
2866   VerifyNoCSetOopsStack,
2867   VerifyNoCSetOopsQueues,
2868   VerifyNoCSetOopsSATBCompleted,
2869   VerifyNoCSetOopsSATBThread
2870 };
2871 
2872 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2873 private:
2874   G1CollectedHeap* _g1h;
2875   VerifyNoCSetOopsPhase _phase;
2876   int _info;
2877 
2878   const char* phase_str() {
2879     switch (_phase) {
2880     case VerifyNoCSetOopsStack:         return "Stack";
2881     case VerifyNoCSetOopsQueues:        return "Queue";
2882     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2883     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2884     default:                            ShouldNotReachHere();
2885     }
2886     return NULL;
2887   }
2888 
2889   void do_object_work(oop obj) {
2890     guarantee(!_g1h->obj_in_cs(obj),
2891               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2892                       (void*) obj, phase_str(), _info));
2893   }
2894 
2895 public:
2896   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2897 
2898   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2899     _phase = phase;
2900     _info = info;
2901   }
2902 
2903   virtual void do_oop(oop* p) {
2904     oop obj = oopDesc::load_decode_heap_oop(p);
2905     do_object_work(obj);
2906   }
2907 
2908   virtual void do_oop(narrowOop* p) {
2909     // We should not come across narrow oops while scanning marking
2910     // stacks and SATB buffers.
2911     ShouldNotReachHere();
2912   }
2913 
2914   virtual void do_object(oop obj) {
2915     do_object_work(obj);
2916   }
2917 };
2918 
2919 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2920                                          bool verify_enqueued_buffers,
2921                                          bool verify_thread_buffers,
2922                                          bool verify_fingers) {
2923   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2924   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2925     return;
2926   }
2927 
2928   VerifyNoCSetOopsClosure cl;
2929 
2930   if (verify_stacks) {
2931     // Verify entries on the global mark stack
2932     cl.set_phase(VerifyNoCSetOopsStack);
2933     _markStack.oops_do(&cl);
2934 
2935     // Verify entries on the task queues
2936     for (uint i = 0; i < _max_worker_id; i += 1) {
2937       cl.set_phase(VerifyNoCSetOopsQueues, i);
2938       CMTaskQueue* queue = _task_queues->queue(i);
2939       queue->oops_do(&cl);
2940     }
2941   }
2942 
2943   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2944 
2945   // Verify entries on the enqueued SATB buffers
2946   if (verify_enqueued_buffers) {
2947     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2948     satb_qs.iterate_completed_buffers_read_only(&cl);
2949   }
2950 
2951   // Verify entries on the per-thread SATB buffers
2952   if (verify_thread_buffers) {
2953     cl.set_phase(VerifyNoCSetOopsSATBThread);
2954     satb_qs.iterate_thread_buffers_read_only(&cl);
2955   }
2956 
2957   if (verify_fingers) {
2958     // Verify the global finger
2959     HeapWord* global_finger = finger();
2960     if (global_finger != NULL && global_finger < _heap_end) {
2961       // The global finger always points to a heap region boundary. We
2962       // use heap_region_containing_raw() to get the containing region
2963       // given that the global finger could be pointing to a free region
2964       // which subsequently becomes continues humongous. If that
2965       // happens, heap_region_containing() will return the bottom of the
2966       // corresponding starts humongous region and the check below will
2967       // not hold any more.
2968       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2969       guarantee(global_finger == global_hr->bottom(),
2970                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2971                         global_finger, HR_FORMAT_PARAMS(global_hr)));
2972     }
2973 
2974     // Verify the task fingers
2975     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2976     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2977       CMTask* task = _tasks[i];
2978       HeapWord* task_finger = task->finger();
2979       if (task_finger != NULL && task_finger < _heap_end) {
2980         // See above note on the global finger verification.
2981         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2982         guarantee(task_finger == task_hr->bottom() ||
2983                   !task_hr->in_collection_set(),
2984                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2985                           task_finger, HR_FORMAT_PARAMS(task_hr)));
2986       }
2987     }
2988   }
2989 }
2990 #endif // PRODUCT
2991 
2992 // Aggregate the counting data that was constructed concurrently
2993 // with marking.
2994 class AggregateCountDataHRClosure: public HeapRegionClosure {
2995   G1CollectedHeap* _g1h;
2996   ConcurrentMark* _cm;
2997   CardTableModRefBS* _ct_bs;
2998   BitMap* _cm_card_bm;
2999   uint _max_worker_id;
3000 
3001  public:
3002   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3003                               BitMap* cm_card_bm,
3004                               uint max_worker_id) :
3005     _g1h(g1h), _cm(g1h->concurrent_mark()),
3006     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3007     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3008 
3009   bool doHeapRegion(HeapRegion* hr) {
3010     if (hr->continuesHumongous()) {
3011       // We will ignore these here and process them when their
3012       // associated "starts humongous" region is processed.
3013       // Note that we cannot rely on their associated
3014       // "starts humongous" region to have their bit set to 1
3015       // since, due to the region chunking in the parallel region
3016       // iteration, a "continues humongous" region might be visited
3017       // before its associated "starts humongous".
3018       return false;
3019     }
3020 
3021     HeapWord* start = hr->bottom();
3022     HeapWord* limit = hr->next_top_at_mark_start();
3023     HeapWord* end = hr->end();
3024 
3025     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3026            err_msg("Preconditions not met - "
3027                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3028                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3029                    start, limit, hr->top(), hr->end()));
3030 
3031     assert(hr->next_marked_bytes() == 0, "Precondition");
3032 
3033     if (start == limit) {
3034       // NTAMS of this region has not been set so nothing to do.
3035       return false;
3036     }
3037 
3038     // 'start' should be in the heap.
3039     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3040     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3041     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3042 
3043     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3044     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3045     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3046 
3047     // If ntams is not card aligned then we bump card bitmap index
3048     // for limit so that we get the all the cards spanned by
3049     // the object ending at ntams.
3050     // Note: if this is the last region in the heap then ntams
3051     // could be actually just beyond the end of the the heap;
3052     // limit_idx will then  correspond to a (non-existent) card
3053     // that is also outside the heap.
3054     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3055       limit_idx += 1;
3056     }
3057 
3058     assert(limit_idx <= end_idx, "or else use atomics");
3059 
3060     // Aggregate the "stripe" in the count data associated with hr.
3061     uint hrs_index = hr->hrs_index();
3062     size_t marked_bytes = 0;
3063 
3064     for (uint i = 0; i < _max_worker_id; i += 1) {
3065       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3066       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3067 
3068       // Fetch the marked_bytes in this region for task i and
3069       // add it to the running total for this region.
3070       marked_bytes += marked_bytes_array[hrs_index];
3071 
3072       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3073       // into the global card bitmap.
3074       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3075 
3076       while (scan_idx < limit_idx) {
3077         assert(task_card_bm->at(scan_idx) == true, "should be");
3078         _cm_card_bm->set_bit(scan_idx);
3079         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3080 
3081         // BitMap::get_next_one_offset() can handle the case when
3082         // its left_offset parameter is greater than its right_offset
3083         // parameter. It does, however, have an early exit if
3084         // left_offset == right_offset. So let's limit the value
3085         // passed in for left offset here.
3086         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3087         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3088       }
3089     }
3090 
3091     // Update the marked bytes for this region.
3092     hr->add_to_marked_bytes(marked_bytes);
3093 
3094     // Next heap region
3095     return false;
3096   }
3097 };
3098 
3099 class G1AggregateCountDataTask: public AbstractGangTask {
3100 protected:
3101   G1CollectedHeap* _g1h;
3102   ConcurrentMark* _cm;
3103   BitMap* _cm_card_bm;
3104   uint _max_worker_id;
3105   int _active_workers;
3106 
3107 public:
3108   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3109                            ConcurrentMark* cm,
3110                            BitMap* cm_card_bm,
3111                            uint max_worker_id,
3112                            int n_workers) :
3113     AbstractGangTask("Count Aggregation"),
3114     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3115     _max_worker_id(max_worker_id),
3116     _active_workers(n_workers) { }
3117 
3118   void work(uint worker_id) {
3119     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3120 
3121     if (G1CollectedHeap::use_parallel_gc_threads()) {
3122       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3123                                             _active_workers,
3124                                             HeapRegion::AggregateCountClaimValue);
3125     } else {
3126       _g1h->heap_region_iterate(&cl);
3127     }
3128   }
3129 };
3130 
3131 
3132 void ConcurrentMark::aggregate_count_data() {
3133   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3134                         _g1h->workers()->active_workers() :
3135                         1);
3136 
3137   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3138                                            _max_worker_id, n_workers);
3139 
3140   if (G1CollectedHeap::use_parallel_gc_threads()) {
3141     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3142            "sanity check");
3143     _g1h->set_par_threads(n_workers);
3144     _g1h->workers()->run_task(&g1_par_agg_task);
3145     _g1h->set_par_threads(0);
3146 
3147     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3148            "sanity check");
3149     _g1h->reset_heap_region_claim_values();
3150   } else {
3151     g1_par_agg_task.work(0);
3152   }
3153 }
3154 
3155 // Clear the per-worker arrays used to store the per-region counting data
3156 void ConcurrentMark::clear_all_count_data() {
3157   // Clear the global card bitmap - it will be filled during
3158   // liveness count aggregation (during remark) and the
3159   // final counting task.
3160   _card_bm.clear();
3161 
3162   // Clear the global region bitmap - it will be filled as part
3163   // of the final counting task.
3164   _region_bm.clear();
3165 
3166   uint max_regions = _g1h->max_regions();
3167   assert(_max_worker_id > 0, "uninitialized");
3168 
3169   for (uint i = 0; i < _max_worker_id; i += 1) {
3170     BitMap* task_card_bm = count_card_bitmap_for(i);
3171     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3172 
3173     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3174     assert(marked_bytes_array != NULL, "uninitialized");
3175 
3176     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3177     task_card_bm->clear();
3178   }
3179 }
3180 
3181 void ConcurrentMark::print_stats() {
3182   if (verbose_stats()) {
3183     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3184     for (size_t i = 0; i < _active_tasks; ++i) {
3185       _tasks[i]->print_stats();
3186       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3187     }
3188   }
3189 }
3190 
3191 // abandon current marking iteration due to a Full GC
3192 void ConcurrentMark::abort() {
3193   // Clear all marks to force marking thread to do nothing
3194   _nextMarkBitMap->clearAll();
3195   // Clear the liveness counting data
3196   clear_all_count_data();
3197   // Empty mark stack
3198   reset_marking_state();
3199   for (uint i = 0; i < _max_worker_id; ++i) {
3200     _tasks[i]->clear_region_fields();
3201   }
3202   _has_aborted = true;
3203 
3204   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3205   satb_mq_set.abandon_partial_marking();
3206   // This can be called either during or outside marking, we'll read
3207   // the expected_active value from the SATB queue set.
3208   satb_mq_set.set_active_all_threads(
3209                                  false, /* new active value */
3210                                  satb_mq_set.is_active() /* expected_active */);
3211 }
3212 
3213 static void print_ms_time_info(const char* prefix, const char* name,
3214                                NumberSeq& ns) {
3215   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3216                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3217   if (ns.num() > 0) {
3218     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3219                            prefix, ns.sd(), ns.maximum());
3220   }
3221 }
3222 
3223 void ConcurrentMark::print_summary_info() {
3224   gclog_or_tty->print_cr(" Concurrent marking:");
3225   print_ms_time_info("  ", "init marks", _init_times);
3226   print_ms_time_info("  ", "remarks", _remark_times);
3227   {
3228     print_ms_time_info("     ", "final marks", _remark_mark_times);
3229     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3230 
3231   }
3232   print_ms_time_info("  ", "cleanups", _cleanup_times);
3233   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3234                          _total_counting_time,
3235                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3236                           (double)_cleanup_times.num()
3237                          : 0.0));
3238   if (G1ScrubRemSets) {
3239     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3240                            _total_rs_scrub_time,
3241                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3242                             (double)_cleanup_times.num()
3243                            : 0.0));
3244   }
3245   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3246                          (_init_times.sum() + _remark_times.sum() +
3247                           _cleanup_times.sum())/1000.0);
3248   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3249                 "(%8.2f s marking).",
3250                 cmThread()->vtime_accum(),
3251                 cmThread()->vtime_mark_accum());
3252 }
3253 
3254 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3255   if (use_parallel_marking_threads()) {
3256     _parallel_workers->print_worker_threads_on(st);
3257   }
3258 }
3259 
3260 // We take a break if someone is trying to stop the world.
3261 bool ConcurrentMark::do_yield_check(uint worker_id) {
3262   if (should_yield()) {
3263     if (worker_id == 0) {
3264       _g1h->g1_policy()->record_concurrent_pause();
3265     }
3266     cmThread()->yield();
3267     return true;
3268   } else {
3269     return false;
3270   }
3271 }
3272 
3273 bool ConcurrentMark::should_yield() {
3274   return cmThread()->should_yield();
3275 }
3276 
3277 bool ConcurrentMark::containing_card_is_marked(void* p) {
3278   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3279   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3280 }
3281 
3282 bool ConcurrentMark::containing_cards_are_marked(void* start,
3283                                                  void* last) {
3284   return containing_card_is_marked(start) &&
3285          containing_card_is_marked(last);
3286 }
3287 
3288 #ifndef PRODUCT
3289 // for debugging purposes
3290 void ConcurrentMark::print_finger() {
3291   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3292                          _heap_start, _heap_end, _finger);
3293   for (uint i = 0; i < _max_worker_id; ++i) {
3294     gclog_or_tty->print("   %u: "PTR_FORMAT, i, _tasks[i]->finger());
3295   }
3296   gclog_or_tty->print_cr("");
3297 }
3298 #endif
3299 
3300 void CMTask::scan_object(oop obj) {
3301   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3302 
3303   if (_cm->verbose_high()) {
3304     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3305                            _worker_id, (void*) obj);
3306   }
3307 
3308   size_t obj_size = obj->size();
3309   _words_scanned += obj_size;
3310 
3311   obj->oop_iterate(_cm_oop_closure);
3312   statsOnly( ++_objs_scanned );
3313   check_limits();
3314 }
3315 
3316 // Closure for iteration over bitmaps
3317 class CMBitMapClosure : public BitMapClosure {
3318 private:
3319   // the bitmap that is being iterated over
3320   CMBitMap*                   _nextMarkBitMap;
3321   ConcurrentMark*             _cm;
3322   CMTask*                     _task;
3323 
3324 public:
3325   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3326     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3327 
3328   bool do_bit(size_t offset) {
3329     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3330     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3331     assert( addr < _cm->finger(), "invariant");
3332 
3333     statsOnly( _task->increase_objs_found_on_bitmap() );
3334     assert(addr >= _task->finger(), "invariant");
3335 
3336     // We move that task's local finger along.
3337     _task->move_finger_to(addr);
3338 
3339     _task->scan_object(oop(addr));
3340     // we only partially drain the local queue and global stack
3341     _task->drain_local_queue(true);
3342     _task->drain_global_stack(true);
3343 
3344     // if the has_aborted flag has been raised, we need to bail out of
3345     // the iteration
3346     return !_task->has_aborted();
3347   }
3348 };
3349 
3350 // Closure for iterating over objects, currently only used for
3351 // processing SATB buffers.
3352 class CMObjectClosure : public ObjectClosure {
3353 private:
3354   CMTask* _task;
3355 
3356 public:
3357   void do_object(oop obj) {
3358     _task->deal_with_reference(obj);
3359   }
3360 
3361   CMObjectClosure(CMTask* task) : _task(task) { }
3362 };
3363 
3364 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3365                                ConcurrentMark* cm,
3366                                CMTask* task)
3367   : _g1h(g1h), _cm(cm), _task(task) {
3368   assert(_ref_processor == NULL, "should be initialized to NULL");
3369 
3370   if (G1UseConcMarkReferenceProcessing) {
3371     _ref_processor = g1h->ref_processor_cm();
3372     assert(_ref_processor != NULL, "should not be NULL");
3373   }
3374 }
3375 
3376 void CMTask::setup_for_region(HeapRegion* hr) {
3377   // Separated the asserts so that we know which one fires.
3378   assert(hr != NULL,
3379         "claim_region() should have filtered out continues humongous regions");
3380   assert(!hr->continuesHumongous(),
3381         "claim_region() should have filtered out continues humongous regions");
3382 
3383   if (_cm->verbose_low()) {
3384     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3385                            _worker_id, hr);
3386   }
3387 
3388   _curr_region  = hr;
3389   _finger       = hr->bottom();
3390   update_region_limit();
3391 }
3392 
3393 void CMTask::update_region_limit() {
3394   HeapRegion* hr            = _curr_region;
3395   HeapWord* bottom          = hr->bottom();
3396   HeapWord* limit           = hr->next_top_at_mark_start();
3397 
3398   if (limit == bottom) {
3399     if (_cm->verbose_low()) {
3400       gclog_or_tty->print_cr("[%u] found an empty region "
3401                              "["PTR_FORMAT", "PTR_FORMAT")",
3402                              _worker_id, bottom, limit);
3403     }
3404     // The region was collected underneath our feet.
3405     // We set the finger to bottom to ensure that the bitmap
3406     // iteration that will follow this will not do anything.
3407     // (this is not a condition that holds when we set the region up,
3408     // as the region is not supposed to be empty in the first place)
3409     _finger = bottom;
3410   } else if (limit >= _region_limit) {
3411     assert(limit >= _finger, "peace of mind");
3412   } else {
3413     assert(limit < _region_limit, "only way to get here");
3414     // This can happen under some pretty unusual circumstances.  An
3415     // evacuation pause empties the region underneath our feet (NTAMS
3416     // at bottom). We then do some allocation in the region (NTAMS
3417     // stays at bottom), followed by the region being used as a GC
3418     // alloc region (NTAMS will move to top() and the objects
3419     // originally below it will be grayed). All objects now marked in
3420     // the region are explicitly grayed, if below the global finger,
3421     // and we do not need in fact to scan anything else. So, we simply
3422     // set _finger to be limit to ensure that the bitmap iteration
3423     // doesn't do anything.
3424     _finger = limit;
3425   }
3426 
3427   _region_limit = limit;
3428 }
3429 
3430 void CMTask::giveup_current_region() {
3431   assert(_curr_region != NULL, "invariant");
3432   if (_cm->verbose_low()) {
3433     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3434                            _worker_id, _curr_region);
3435   }
3436   clear_region_fields();
3437 }
3438 
3439 void CMTask::clear_region_fields() {
3440   // Values for these three fields that indicate that we're not
3441   // holding on to a region.
3442   _curr_region   = NULL;
3443   _finger        = NULL;
3444   _region_limit  = NULL;
3445 }
3446 
3447 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3448   if (cm_oop_closure == NULL) {
3449     assert(_cm_oop_closure != NULL, "invariant");
3450   } else {
3451     assert(_cm_oop_closure == NULL, "invariant");
3452   }
3453   _cm_oop_closure = cm_oop_closure;
3454 }
3455 
3456 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3457   guarantee(nextMarkBitMap != NULL, "invariant");
3458 
3459   if (_cm->verbose_low()) {
3460     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3461   }
3462 
3463   _nextMarkBitMap                = nextMarkBitMap;
3464   clear_region_fields();
3465 
3466   _calls                         = 0;
3467   _elapsed_time_ms               = 0.0;
3468   _termination_time_ms           = 0.0;
3469   _termination_start_time_ms     = 0.0;
3470 
3471 #if _MARKING_STATS_
3472   _local_pushes                  = 0;
3473   _local_pops                    = 0;
3474   _local_max_size                = 0;
3475   _objs_scanned                  = 0;
3476   _global_pushes                 = 0;
3477   _global_pops                   = 0;
3478   _global_max_size               = 0;
3479   _global_transfers_to           = 0;
3480   _global_transfers_from         = 0;
3481   _regions_claimed               = 0;
3482   _objs_found_on_bitmap          = 0;
3483   _satb_buffers_processed        = 0;
3484   _steal_attempts                = 0;
3485   _steals                        = 0;
3486   _aborted                       = 0;
3487   _aborted_overflow              = 0;
3488   _aborted_cm_aborted            = 0;
3489   _aborted_yield                 = 0;
3490   _aborted_timed_out             = 0;
3491   _aborted_satb                  = 0;
3492   _aborted_termination           = 0;
3493 #endif // _MARKING_STATS_
3494 }
3495 
3496 bool CMTask::should_exit_termination() {
3497   regular_clock_call();
3498   // This is called when we are in the termination protocol. We should
3499   // quit if, for some reason, this task wants to abort or the global
3500   // stack is not empty (this means that we can get work from it).
3501   return !_cm->mark_stack_empty() || has_aborted();
3502 }
3503 
3504 void CMTask::reached_limit() {
3505   assert(_words_scanned >= _words_scanned_limit ||
3506          _refs_reached >= _refs_reached_limit ,
3507          "shouldn't have been called otherwise");
3508   regular_clock_call();
3509 }
3510 
3511 void CMTask::regular_clock_call() {
3512   if (has_aborted()) return;
3513 
3514   // First, we need to recalculate the words scanned and refs reached
3515   // limits for the next clock call.
3516   recalculate_limits();
3517 
3518   // During the regular clock call we do the following
3519 
3520   // (1) If an overflow has been flagged, then we abort.
3521   if (_cm->has_overflown()) {
3522     set_has_aborted();
3523     return;
3524   }
3525 
3526   // If we are not concurrent (i.e. we're doing remark) we don't need
3527   // to check anything else. The other steps are only needed during
3528   // the concurrent marking phase.
3529   if (!concurrent()) return;
3530 
3531   // (2) If marking has been aborted for Full GC, then we also abort.
3532   if (_cm->has_aborted()) {
3533     set_has_aborted();
3534     statsOnly( ++_aborted_cm_aborted );
3535     return;
3536   }
3537 
3538   double curr_time_ms = os::elapsedVTime() * 1000.0;
3539 
3540   // (3) If marking stats are enabled, then we update the step history.
3541 #if _MARKING_STATS_
3542   if (_words_scanned >= _words_scanned_limit) {
3543     ++_clock_due_to_scanning;
3544   }
3545   if (_refs_reached >= _refs_reached_limit) {
3546     ++_clock_due_to_marking;
3547   }
3548 
3549   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3550   _interval_start_time_ms = curr_time_ms;
3551   _all_clock_intervals_ms.add(last_interval_ms);
3552 
3553   if (_cm->verbose_medium()) {
3554       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3555                         "scanned = %d%s, refs reached = %d%s",
3556                         _worker_id, last_interval_ms,
3557                         _words_scanned,
3558                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3559                         _refs_reached,
3560                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3561   }
3562 #endif // _MARKING_STATS_
3563 
3564   // (4) We check whether we should yield. If we have to, then we abort.
3565   if (_cm->should_yield()) {
3566     // We should yield. To do this we abort the task. The caller is
3567     // responsible for yielding.
3568     set_has_aborted();
3569     statsOnly( ++_aborted_yield );
3570     return;
3571   }
3572 
3573   // (5) We check whether we've reached our time quota. If we have,
3574   // then we abort.
3575   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3576   if (elapsed_time_ms > _time_target_ms) {
3577     set_has_aborted();
3578     _has_timed_out = true;
3579     statsOnly( ++_aborted_timed_out );
3580     return;
3581   }
3582 
3583   // (6) Finally, we check whether there are enough completed STAB
3584   // buffers available for processing. If there are, we abort.
3585   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3586   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3587     if (_cm->verbose_low()) {
3588       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3589                              _worker_id);
3590     }
3591     // we do need to process SATB buffers, we'll abort and restart
3592     // the marking task to do so
3593     set_has_aborted();
3594     statsOnly( ++_aborted_satb );
3595     return;
3596   }
3597 }
3598 
3599 void CMTask::recalculate_limits() {
3600   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3601   _words_scanned_limit      = _real_words_scanned_limit;
3602 
3603   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3604   _refs_reached_limit       = _real_refs_reached_limit;
3605 }
3606 
3607 void CMTask::decrease_limits() {
3608   // This is called when we believe that we're going to do an infrequent
3609   // operation which will increase the per byte scanned cost (i.e. move
3610   // entries to/from the global stack). It basically tries to decrease the
3611   // scanning limit so that the clock is called earlier.
3612 
3613   if (_cm->verbose_medium()) {
3614     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3615   }
3616 
3617   _words_scanned_limit = _real_words_scanned_limit -
3618     3 * words_scanned_period / 4;
3619   _refs_reached_limit  = _real_refs_reached_limit -
3620     3 * refs_reached_period / 4;
3621 }
3622 
3623 void CMTask::move_entries_to_global_stack() {
3624   // local array where we'll store the entries that will be popped
3625   // from the local queue
3626   oop buffer[global_stack_transfer_size];
3627 
3628   int n = 0;
3629   oop obj;
3630   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3631     buffer[n] = obj;
3632     ++n;
3633   }
3634 
3635   if (n > 0) {
3636     // we popped at least one entry from the local queue
3637 
3638     statsOnly( ++_global_transfers_to; _local_pops += n );
3639 
3640     if (!_cm->mark_stack_push(buffer, n)) {
3641       if (_cm->verbose_low()) {
3642         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3643                                _worker_id);
3644       }
3645       set_has_aborted();
3646     } else {
3647       // the transfer was successful
3648 
3649       if (_cm->verbose_medium()) {
3650         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3651                                _worker_id, n);
3652       }
3653       statsOnly( int tmp_size = _cm->mark_stack_size();
3654                  if (tmp_size > _global_max_size) {
3655                    _global_max_size = tmp_size;
3656                  }
3657                  _global_pushes += n );
3658     }
3659   }
3660 
3661   // this operation was quite expensive, so decrease the limits
3662   decrease_limits();
3663 }
3664 
3665 void CMTask::get_entries_from_global_stack() {
3666   // local array where we'll store the entries that will be popped
3667   // from the global stack.
3668   oop buffer[global_stack_transfer_size];
3669   int n;
3670   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3671   assert(n <= global_stack_transfer_size,
3672          "we should not pop more than the given limit");
3673   if (n > 0) {
3674     // yes, we did actually pop at least one entry
3675 
3676     statsOnly( ++_global_transfers_from; _global_pops += n );
3677     if (_cm->verbose_medium()) {
3678       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3679                              _worker_id, n);
3680     }
3681     for (int i = 0; i < n; ++i) {
3682       bool success = _task_queue->push(buffer[i]);
3683       // We only call this when the local queue is empty or under a
3684       // given target limit. So, we do not expect this push to fail.
3685       assert(success, "invariant");
3686     }
3687 
3688     statsOnly( int tmp_size = _task_queue->size();
3689                if (tmp_size > _local_max_size) {
3690                  _local_max_size = tmp_size;
3691                }
3692                _local_pushes += n );
3693   }
3694 
3695   // this operation was quite expensive, so decrease the limits
3696   decrease_limits();
3697 }
3698 
3699 void CMTask::drain_local_queue(bool partially) {
3700   if (has_aborted()) return;
3701 
3702   // Decide what the target size is, depending whether we're going to
3703   // drain it partially (so that other tasks can steal if they run out
3704   // of things to do) or totally (at the very end).
3705   size_t target_size;
3706   if (partially) {
3707     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3708   } else {
3709     target_size = 0;
3710   }
3711 
3712   if (_task_queue->size() > target_size) {
3713     if (_cm->verbose_high()) {
3714       gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
3715                              _worker_id, target_size);
3716     }
3717 
3718     oop obj;
3719     bool ret = _task_queue->pop_local(obj);
3720     while (ret) {
3721       statsOnly( ++_local_pops );
3722 
3723       if (_cm->verbose_high()) {
3724         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3725                                (void*) obj);
3726       }
3727 
3728       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3729       assert(!_g1h->is_on_master_free_list(
3730                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3731 
3732       scan_object(obj);
3733 
3734       if (_task_queue->size() <= target_size || has_aborted()) {
3735         ret = false;
3736       } else {
3737         ret = _task_queue->pop_local(obj);
3738       }
3739     }
3740 
3741     if (_cm->verbose_high()) {
3742       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3743                              _worker_id, _task_queue->size());
3744     }
3745   }
3746 }
3747 
3748 void CMTask::drain_global_stack(bool partially) {
3749   if (has_aborted()) return;
3750 
3751   // We have a policy to drain the local queue before we attempt to
3752   // drain the global stack.
3753   assert(partially || _task_queue->size() == 0, "invariant");
3754 
3755   // Decide what the target size is, depending whether we're going to
3756   // drain it partially (so that other tasks can steal if they run out
3757   // of things to do) or totally (at the very end).  Notice that,
3758   // because we move entries from the global stack in chunks or
3759   // because another task might be doing the same, we might in fact
3760   // drop below the target. But, this is not a problem.
3761   size_t target_size;
3762   if (partially) {
3763     target_size = _cm->partial_mark_stack_size_target();
3764   } else {
3765     target_size = 0;
3766   }
3767 
3768   if (_cm->mark_stack_size() > target_size) {
3769     if (_cm->verbose_low()) {
3770       gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
3771                              _worker_id, target_size);
3772     }
3773 
3774     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3775       get_entries_from_global_stack();
3776       drain_local_queue(partially);
3777     }
3778 
3779     if (_cm->verbose_low()) {
3780       gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
3781                              _worker_id, _cm->mark_stack_size());
3782     }
3783   }
3784 }
3785 
3786 // SATB Queue has several assumptions on whether to call the par or
3787 // non-par versions of the methods. this is why some of the code is
3788 // replicated. We should really get rid of the single-threaded version
3789 // of the code to simplify things.
3790 void CMTask::drain_satb_buffers() {
3791   if (has_aborted()) return;
3792 
3793   // We set this so that the regular clock knows that we're in the
3794   // middle of draining buffers and doesn't set the abort flag when it
3795   // notices that SATB buffers are available for draining. It'd be
3796   // very counter productive if it did that. :-)
3797   _draining_satb_buffers = true;
3798 
3799   CMObjectClosure oc(this);
3800   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3801   if (G1CollectedHeap::use_parallel_gc_threads()) {
3802     satb_mq_set.set_par_closure(_worker_id, &oc);
3803   } else {
3804     satb_mq_set.set_closure(&oc);
3805   }
3806 
3807   // This keeps claiming and applying the closure to completed buffers
3808   // until we run out of buffers or we need to abort.
3809   if (G1CollectedHeap::use_parallel_gc_threads()) {
3810     while (!has_aborted() &&
3811            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3812       if (_cm->verbose_medium()) {
3813         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3814       }
3815       statsOnly( ++_satb_buffers_processed );
3816       regular_clock_call();
3817     }
3818   } else {
3819     while (!has_aborted() &&
3820            satb_mq_set.apply_closure_to_completed_buffer()) {
3821       if (_cm->verbose_medium()) {
3822         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3823       }
3824       statsOnly( ++_satb_buffers_processed );
3825       regular_clock_call();
3826     }
3827   }
3828 
3829   if (!concurrent() && !has_aborted()) {
3830     // We should only do this during remark.
3831     if (G1CollectedHeap::use_parallel_gc_threads()) {
3832       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3833     } else {
3834       satb_mq_set.iterate_closure_all_threads();
3835     }
3836   }
3837 
3838   _draining_satb_buffers = false;
3839 
3840   assert(has_aborted() ||
3841          concurrent() ||
3842          satb_mq_set.completed_buffers_num() == 0, "invariant");
3843 
3844   if (G1CollectedHeap::use_parallel_gc_threads()) {
3845     satb_mq_set.set_par_closure(_worker_id, NULL);
3846   } else {
3847     satb_mq_set.set_closure(NULL);
3848   }
3849 
3850   // again, this was a potentially expensive operation, decrease the
3851   // limits to get the regular clock call early
3852   decrease_limits();
3853 }
3854 
3855 void CMTask::print_stats() {
3856   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3857                          _worker_id, _calls);
3858   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3859                          _elapsed_time_ms, _termination_time_ms);
3860   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3861                          _step_times_ms.num(), _step_times_ms.avg(),
3862                          _step_times_ms.sd());
3863   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3864                          _step_times_ms.maximum(), _step_times_ms.sum());
3865 
3866 #if _MARKING_STATS_
3867   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3868                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3869                          _all_clock_intervals_ms.sd());
3870   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3871                          _all_clock_intervals_ms.maximum(),
3872                          _all_clock_intervals_ms.sum());
3873   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
3874                          _clock_due_to_scanning, _clock_due_to_marking);
3875   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
3876                          _objs_scanned, _objs_found_on_bitmap);
3877   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
3878                          _local_pushes, _local_pops, _local_max_size);
3879   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
3880                          _global_pushes, _global_pops, _global_max_size);
3881   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
3882                          _global_transfers_to,_global_transfers_from);
3883   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
3884   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
3885   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
3886                          _steal_attempts, _steals);
3887   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
3888   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
3889                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3890   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
3891                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3892 #endif // _MARKING_STATS_
3893 }
3894 
3895 /*****************************************************************************
3896 
3897     The do_marking_step(time_target_ms) method is the building block
3898     of the parallel marking framework. It can be called in parallel
3899     with other invocations of do_marking_step() on different tasks
3900     (but only one per task, obviously) and concurrently with the
3901     mutator threads, or during remark, hence it eliminates the need
3902     for two versions of the code. When called during remark, it will
3903     pick up from where the task left off during the concurrent marking
3904     phase. Interestingly, tasks are also claimable during evacuation
3905     pauses too, since do_marking_step() ensures that it aborts before
3906     it needs to yield.
3907 
3908     The data structures that is uses to do marking work are the
3909     following:
3910 
3911       (1) Marking Bitmap. If there are gray objects that appear only
3912       on the bitmap (this happens either when dealing with an overflow
3913       or when the initial marking phase has simply marked the roots
3914       and didn't push them on the stack), then tasks claim heap
3915       regions whose bitmap they then scan to find gray objects. A
3916       global finger indicates where the end of the last claimed region
3917       is. A local finger indicates how far into the region a task has
3918       scanned. The two fingers are used to determine how to gray an
3919       object (i.e. whether simply marking it is OK, as it will be
3920       visited by a task in the future, or whether it needs to be also
3921       pushed on a stack).
3922 
3923       (2) Local Queue. The local queue of the task which is accessed
3924       reasonably efficiently by the task. Other tasks can steal from
3925       it when they run out of work. Throughout the marking phase, a
3926       task attempts to keep its local queue short but not totally
3927       empty, so that entries are available for stealing by other
3928       tasks. Only when there is no more work, a task will totally
3929       drain its local queue.
3930 
3931       (3) Global Mark Stack. This handles local queue overflow. During
3932       marking only sets of entries are moved between it and the local
3933       queues, as access to it requires a mutex and more fine-grain
3934       interaction with it which might cause contention. If it
3935       overflows, then the marking phase should restart and iterate
3936       over the bitmap to identify gray objects. Throughout the marking
3937       phase, tasks attempt to keep the global mark stack at a small
3938       length but not totally empty, so that entries are available for
3939       popping by other tasks. Only when there is no more work, tasks
3940       will totally drain the global mark stack.
3941 
3942       (4) SATB Buffer Queue. This is where completed SATB buffers are
3943       made available. Buffers are regularly removed from this queue
3944       and scanned for roots, so that the queue doesn't get too
3945       long. During remark, all completed buffers are processed, as
3946       well as the filled in parts of any uncompleted buffers.
3947 
3948     The do_marking_step() method tries to abort when the time target
3949     has been reached. There are a few other cases when the
3950     do_marking_step() method also aborts:
3951 
3952       (1) When the marking phase has been aborted (after a Full GC).
3953 
3954       (2) When a global overflow (on the global stack) has been
3955       triggered. Before the task aborts, it will actually sync up with
3956       the other tasks to ensure that all the marking data structures
3957       (local queues, stacks, fingers etc.)  are re-initialised so that
3958       when do_marking_step() completes, the marking phase can
3959       immediately restart.
3960 
3961       (3) When enough completed SATB buffers are available. The
3962       do_marking_step() method only tries to drain SATB buffers right
3963       at the beginning. So, if enough buffers are available, the
3964       marking step aborts and the SATB buffers are processed at
3965       the beginning of the next invocation.
3966 
3967       (4) To yield. when we have to yield then we abort and yield
3968       right at the end of do_marking_step(). This saves us from a lot
3969       of hassle as, by yielding we might allow a Full GC. If this
3970       happens then objects will be compacted underneath our feet, the
3971       heap might shrink, etc. We save checking for this by just
3972       aborting and doing the yield right at the end.
3973 
3974     From the above it follows that the do_marking_step() method should
3975     be called in a loop (or, otherwise, regularly) until it completes.
3976 
3977     If a marking step completes without its has_aborted() flag being
3978     true, it means it has completed the current marking phase (and
3979     also all other marking tasks have done so and have all synced up).
3980 
3981     A method called regular_clock_call() is invoked "regularly" (in
3982     sub ms intervals) throughout marking. It is this clock method that
3983     checks all the abort conditions which were mentioned above and
3984     decides when the task should abort. A work-based scheme is used to
3985     trigger this clock method: when the number of object words the
3986     marking phase has scanned or the number of references the marking
3987     phase has visited reach a given limit. Additional invocations to
3988     the method clock have been planted in a few other strategic places
3989     too. The initial reason for the clock method was to avoid calling
3990     vtime too regularly, as it is quite expensive. So, once it was in
3991     place, it was natural to piggy-back all the other conditions on it
3992     too and not constantly check them throughout the code.
3993 
3994  *****************************************************************************/
3995 
3996 void CMTask::do_marking_step(double time_target_ms,
3997                              bool do_stealing,
3998                              bool do_termination) {
3999   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4000   assert(concurrent() == _cm->concurrent(), "they should be the same");
4001 
4002   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4003   assert(_task_queues != NULL, "invariant");
4004   assert(_task_queue != NULL, "invariant");
4005   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4006 
4007   assert(!_claimed,
4008          "only one thread should claim this task at any one time");
4009 
4010   // OK, this doesn't safeguard again all possible scenarios, as it is
4011   // possible for two threads to set the _claimed flag at the same
4012   // time. But it is only for debugging purposes anyway and it will
4013   // catch most problems.
4014   _claimed = true;
4015 
4016   _start_time_ms = os::elapsedVTime() * 1000.0;
4017   statsOnly( _interval_start_time_ms = _start_time_ms );
4018 
4019   double diff_prediction_ms =
4020     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4021   _time_target_ms = time_target_ms - diff_prediction_ms;
4022 
4023   // set up the variables that are used in the work-based scheme to
4024   // call the regular clock method
4025   _words_scanned = 0;
4026   _refs_reached  = 0;
4027   recalculate_limits();
4028 
4029   // clear all flags
4030   clear_has_aborted();
4031   _has_timed_out = false;
4032   _draining_satb_buffers = false;
4033 
4034   ++_calls;
4035 
4036   if (_cm->verbose_low()) {
4037     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4038                            "target = %1.2lfms >>>>>>>>>>",
4039                            _worker_id, _calls, _time_target_ms);
4040   }
4041 
4042   // Set up the bitmap and oop closures. Anything that uses them is
4043   // eventually called from this method, so it is OK to allocate these
4044   // statically.
4045   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4046   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4047   set_cm_oop_closure(&cm_oop_closure);
4048 
4049   if (_cm->has_overflown()) {
4050     // This can happen if the mark stack overflows during a GC pause
4051     // and this task, after a yield point, restarts. We have to abort
4052     // as we need to get into the overflow protocol which happens
4053     // right at the end of this task.
4054     set_has_aborted();
4055   }
4056 
4057   // First drain any available SATB buffers. After this, we will not
4058   // look at SATB buffers before the next invocation of this method.
4059   // If enough completed SATB buffers are queued up, the regular clock
4060   // will abort this task so that it restarts.
4061   drain_satb_buffers();
4062   // ...then partially drain the local queue and the global stack
4063   drain_local_queue(true);
4064   drain_global_stack(true);
4065 
4066   do {
4067     if (!has_aborted() && _curr_region != NULL) {
4068       // This means that we're already holding on to a region.
4069       assert(_finger != NULL, "if region is not NULL, then the finger "
4070              "should not be NULL either");
4071 
4072       // We might have restarted this task after an evacuation pause
4073       // which might have evacuated the region we're holding on to
4074       // underneath our feet. Let's read its limit again to make sure
4075       // that we do not iterate over a region of the heap that
4076       // contains garbage (update_region_limit() will also move
4077       // _finger to the start of the region if it is found empty).
4078       update_region_limit();
4079       // We will start from _finger not from the start of the region,
4080       // as we might be restarting this task after aborting half-way
4081       // through scanning this region. In this case, _finger points to
4082       // the address where we last found a marked object. If this is a
4083       // fresh region, _finger points to start().
4084       MemRegion mr = MemRegion(_finger, _region_limit);
4085 
4086       if (_cm->verbose_low()) {
4087         gclog_or_tty->print_cr("[%u] we're scanning part "
4088                                "["PTR_FORMAT", "PTR_FORMAT") "
4089                                "of region "HR_FORMAT,
4090                                _worker_id, _finger, _region_limit,
4091                                HR_FORMAT_PARAMS(_curr_region));
4092       }
4093 
4094       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4095              "humongous regions should go around loop once only");
4096 
4097       // Some special cases:
4098       // If the memory region is empty, we can just give up the region.
4099       // If the current region is humongous then we only need to check
4100       // the bitmap for the bit associated with the start of the object,
4101       // scan the object if it's live, and give up the region.
4102       // Otherwise, let's iterate over the bitmap of the part of the region
4103       // that is left.
4104       // If the iteration is successful, give up the region.
4105       if (mr.is_empty()) {
4106         giveup_current_region();
4107         regular_clock_call();
4108       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4109         if (_nextMarkBitMap->isMarked(mr.start())) {
4110           // The object is marked - apply the closure
4111           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4112           bitmap_closure.do_bit(offset);
4113         }
4114         // Even if this task aborted while scanning the humongous object
4115         // we can (and should) give up the current region.
4116         giveup_current_region();
4117         regular_clock_call();
4118       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4119         giveup_current_region();
4120         regular_clock_call();
4121       } else {
4122         assert(has_aborted(), "currently the only way to do so");
4123         // The only way to abort the bitmap iteration is to return
4124         // false from the do_bit() method. However, inside the
4125         // do_bit() method we move the _finger to point to the
4126         // object currently being looked at. So, if we bail out, we
4127         // have definitely set _finger to something non-null.
4128         assert(_finger != NULL, "invariant");
4129 
4130         // Region iteration was actually aborted. So now _finger
4131         // points to the address of the object we last scanned. If we
4132         // leave it there, when we restart this task, we will rescan
4133         // the object. It is easy to avoid this. We move the finger by
4134         // enough to point to the next possible object header (the
4135         // bitmap knows by how much we need to move it as it knows its
4136         // granularity).
4137         assert(_finger < _region_limit, "invariant");
4138         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4139         // Check if bitmap iteration was aborted while scanning the last object
4140         if (new_finger >= _region_limit) {
4141           giveup_current_region();
4142         } else {
4143           move_finger_to(new_finger);
4144         }
4145       }
4146     }
4147     // At this point we have either completed iterating over the
4148     // region we were holding on to, or we have aborted.
4149 
4150     // We then partially drain the local queue and the global stack.
4151     // (Do we really need this?)
4152     drain_local_queue(true);
4153     drain_global_stack(true);
4154 
4155     // Read the note on the claim_region() method on why it might
4156     // return NULL with potentially more regions available for
4157     // claiming and why we have to check out_of_regions() to determine
4158     // whether we're done or not.
4159     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4160       // We are going to try to claim a new region. We should have
4161       // given up on the previous one.
4162       // Separated the asserts so that we know which one fires.
4163       assert(_curr_region  == NULL, "invariant");
4164       assert(_finger       == NULL, "invariant");
4165       assert(_region_limit == NULL, "invariant");
4166       if (_cm->verbose_low()) {
4167         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4168       }
4169       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4170       if (claimed_region != NULL) {
4171         // Yes, we managed to claim one
4172         statsOnly( ++_regions_claimed );
4173 
4174         if (_cm->verbose_low()) {
4175           gclog_or_tty->print_cr("[%u] we successfully claimed "
4176                                  "region "PTR_FORMAT,
4177                                  _worker_id, claimed_region);
4178         }
4179 
4180         setup_for_region(claimed_region);
4181         assert(_curr_region == claimed_region, "invariant");
4182       }
4183       // It is important to call the regular clock here. It might take
4184       // a while to claim a region if, for example, we hit a large
4185       // block of empty regions. So we need to call the regular clock
4186       // method once round the loop to make sure it's called
4187       // frequently enough.
4188       regular_clock_call();
4189     }
4190 
4191     if (!has_aborted() && _curr_region == NULL) {
4192       assert(_cm->out_of_regions(),
4193              "at this point we should be out of regions");
4194     }
4195   } while ( _curr_region != NULL && !has_aborted());
4196 
4197   if (!has_aborted()) {
4198     // We cannot check whether the global stack is empty, since other
4199     // tasks might be pushing objects to it concurrently.
4200     assert(_cm->out_of_regions(),
4201            "at this point we should be out of regions");
4202 
4203     if (_cm->verbose_low()) {
4204       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4205     }
4206 
4207     // Try to reduce the number of available SATB buffers so that
4208     // remark has less work to do.
4209     drain_satb_buffers();
4210   }
4211 
4212   // Since we've done everything else, we can now totally drain the
4213   // local queue and global stack.
4214   drain_local_queue(false);
4215   drain_global_stack(false);
4216 
4217   // Attempt at work stealing from other task's queues.
4218   if (do_stealing && !has_aborted()) {
4219     // We have not aborted. This means that we have finished all that
4220     // we could. Let's try to do some stealing...
4221 
4222     // We cannot check whether the global stack is empty, since other
4223     // tasks might be pushing objects to it concurrently.
4224     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4225            "only way to reach here");
4226 
4227     if (_cm->verbose_low()) {
4228       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4229     }
4230 
4231     while (!has_aborted()) {
4232       oop obj;
4233       statsOnly( ++_steal_attempts );
4234 
4235       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4236         if (_cm->verbose_medium()) {
4237           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4238                                  _worker_id, (void*) obj);
4239         }
4240 
4241         statsOnly( ++_steals );
4242 
4243         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4244                "any stolen object should be marked");
4245         scan_object(obj);
4246 
4247         // And since we're towards the end, let's totally drain the
4248         // local queue and global stack.
4249         drain_local_queue(false);
4250         drain_global_stack(false);
4251       } else {
4252         break;
4253       }
4254     }
4255   }
4256 
4257   // If we are about to wrap up and go into termination, check if we
4258   // should raise the overflow flag.
4259   if (do_termination && !has_aborted()) {
4260     if (_cm->force_overflow()->should_force()) {
4261       _cm->set_has_overflown();
4262       regular_clock_call();
4263     }
4264   }
4265 
4266   // We still haven't aborted. Now, let's try to get into the
4267   // termination protocol.
4268   if (do_termination && !has_aborted()) {
4269     // We cannot check whether the global stack is empty, since other
4270     // tasks might be concurrently pushing objects on it.
4271     // Separated the asserts so that we know which one fires.
4272     assert(_cm->out_of_regions(), "only way to reach here");
4273     assert(_task_queue->size() == 0, "only way to reach here");
4274 
4275     if (_cm->verbose_low()) {
4276       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4277     }
4278 
4279     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4280     // The CMTask class also extends the TerminatorTerminator class,
4281     // hence its should_exit_termination() method will also decide
4282     // whether to exit the termination protocol or not.
4283     bool finished = _cm->terminator()->offer_termination(this);
4284     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4285     _termination_time_ms +=
4286       termination_end_time_ms - _termination_start_time_ms;
4287 
4288     if (finished) {
4289       // We're all done.
4290 
4291       if (_worker_id == 0) {
4292         // let's allow task 0 to do this
4293         if (concurrent()) {
4294           assert(_cm->concurrent_marking_in_progress(), "invariant");
4295           // we need to set this to false before the next
4296           // safepoint. This way we ensure that the marking phase
4297           // doesn't observe any more heap expansions.
4298           _cm->clear_concurrent_marking_in_progress();
4299         }
4300       }
4301 
4302       // We can now guarantee that the global stack is empty, since
4303       // all other tasks have finished. We separated the guarantees so
4304       // that, if a condition is false, we can immediately find out
4305       // which one.
4306       guarantee(_cm->out_of_regions(), "only way to reach here");
4307       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4308       guarantee(_task_queue->size() == 0, "only way to reach here");
4309       guarantee(!_cm->has_overflown(), "only way to reach here");
4310       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4311 
4312       if (_cm->verbose_low()) {
4313         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4314       }
4315     } else {
4316       // Apparently there's more work to do. Let's abort this task. It
4317       // will restart it and we can hopefully find more things to do.
4318 
4319       if (_cm->verbose_low()) {
4320         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4321                                _worker_id);
4322       }
4323 
4324       set_has_aborted();
4325       statsOnly( ++_aborted_termination );
4326     }
4327   }
4328 
4329   // Mainly for debugging purposes to make sure that a pointer to the
4330   // closure which was statically allocated in this frame doesn't
4331   // escape it by accident.
4332   set_cm_oop_closure(NULL);
4333   double end_time_ms = os::elapsedVTime() * 1000.0;
4334   double elapsed_time_ms = end_time_ms - _start_time_ms;
4335   // Update the step history.
4336   _step_times_ms.add(elapsed_time_ms);
4337 
4338   if (has_aborted()) {
4339     // The task was aborted for some reason.
4340 
4341     statsOnly( ++_aborted );
4342 
4343     if (_has_timed_out) {
4344       double diff_ms = elapsed_time_ms - _time_target_ms;
4345       // Keep statistics of how well we did with respect to hitting
4346       // our target only if we actually timed out (if we aborted for
4347       // other reasons, then the results might get skewed).
4348       _marking_step_diffs_ms.add(diff_ms);
4349     }
4350 
4351     if (_cm->has_overflown()) {
4352       // This is the interesting one. We aborted because a global
4353       // overflow was raised. This means we have to restart the
4354       // marking phase and start iterating over regions. However, in
4355       // order to do this we have to make sure that all tasks stop
4356       // what they are doing and re-initialise in a safe manner. We
4357       // will achieve this with the use of two barrier sync points.
4358 
4359       if (_cm->verbose_low()) {
4360         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4361       }
4362 
4363       _cm->enter_first_sync_barrier(_worker_id);
4364       // When we exit this sync barrier we know that all tasks have
4365       // stopped doing marking work. So, it's now safe to
4366       // re-initialise our data structures. At the end of this method,
4367       // task 0 will clear the global data structures.
4368 
4369       statsOnly( ++_aborted_overflow );
4370 
4371       // We clear the local state of this task...
4372       clear_region_fields();
4373 
4374       // ...and enter the second barrier.
4375       _cm->enter_second_sync_barrier(_worker_id);
4376       // At this point everything has bee re-initialised and we're
4377       // ready to restart.
4378     }
4379 
4380     if (_cm->verbose_low()) {
4381       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4382                              "elapsed = %1.2lfms <<<<<<<<<<",
4383                              _worker_id, _time_target_ms, elapsed_time_ms);
4384       if (_cm->has_aborted()) {
4385         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4386                                _worker_id);
4387       }
4388     }
4389   } else {
4390     if (_cm->verbose_low()) {
4391       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4392                              "elapsed = %1.2lfms <<<<<<<<<<",
4393                              _worker_id, _time_target_ms, elapsed_time_ms);
4394     }
4395   }
4396 
4397   _claimed = false;
4398 }
4399 
4400 CMTask::CMTask(uint worker_id,
4401                ConcurrentMark* cm,
4402                size_t* marked_bytes,
4403                BitMap* card_bm,
4404                CMTaskQueue* task_queue,
4405                CMTaskQueueSet* task_queues)
4406   : _g1h(G1CollectedHeap::heap()),
4407     _worker_id(worker_id), _cm(cm),
4408     _claimed(false),
4409     _nextMarkBitMap(NULL), _hash_seed(17),
4410     _task_queue(task_queue),
4411     _task_queues(task_queues),
4412     _cm_oop_closure(NULL),
4413     _marked_bytes_array(marked_bytes),
4414     _card_bm(card_bm) {
4415   guarantee(task_queue != NULL, "invariant");
4416   guarantee(task_queues != NULL, "invariant");
4417 
4418   statsOnly( _clock_due_to_scanning = 0;
4419              _clock_due_to_marking  = 0 );
4420 
4421   _marking_step_diffs_ms.add(0.5);
4422 }
4423 
4424 // These are formatting macros that are used below to ensure
4425 // consistent formatting. The *_H_* versions are used to format the
4426 // header for a particular value and they should be kept consistent
4427 // with the corresponding macro. Also note that most of the macros add
4428 // the necessary white space (as a prefix) which makes them a bit
4429 // easier to compose.
4430 
4431 // All the output lines are prefixed with this string to be able to
4432 // identify them easily in a large log file.
4433 #define G1PPRL_LINE_PREFIX            "###"
4434 
4435 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4436 #ifdef _LP64
4437 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4438 #else // _LP64
4439 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4440 #endif // _LP64
4441 
4442 // For per-region info
4443 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4444 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4445 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4446 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4447 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4448 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4449 
4450 // For summary info
4451 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4452 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4453 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4454 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4455 
4456 G1PrintRegionLivenessInfoClosure::
4457 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4458   : _out(out),
4459     _total_used_bytes(0), _total_capacity_bytes(0),
4460     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4461     _hum_used_bytes(0), _hum_capacity_bytes(0),
4462     _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
4463   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4464   MemRegion g1_committed = g1h->g1_committed();
4465   MemRegion g1_reserved = g1h->g1_reserved();
4466   double now = os::elapsedTime();
4467 
4468   // Print the header of the output.
4469   _out->cr();
4470   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4471   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4472                  G1PPRL_SUM_ADDR_FORMAT("committed")
4473                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4474                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4475                  g1_committed.start(), g1_committed.end(),
4476                  g1_reserved.start(), g1_reserved.end(),
4477                  HeapRegion::GrainBytes);
4478   _out->print_cr(G1PPRL_LINE_PREFIX);
4479   _out->print_cr(G1PPRL_LINE_PREFIX
4480                  G1PPRL_TYPE_H_FORMAT
4481                  G1PPRL_ADDR_BASE_H_FORMAT
4482                  G1PPRL_BYTE_H_FORMAT
4483                  G1PPRL_BYTE_H_FORMAT
4484                  G1PPRL_BYTE_H_FORMAT
4485                  G1PPRL_DOUBLE_H_FORMAT,
4486                  "type", "address-range",
4487                  "used", "prev-live", "next-live", "gc-eff");
4488   _out->print_cr(G1PPRL_LINE_PREFIX
4489                  G1PPRL_TYPE_H_FORMAT
4490                  G1PPRL_ADDR_BASE_H_FORMAT
4491                  G1PPRL_BYTE_H_FORMAT
4492                  G1PPRL_BYTE_H_FORMAT
4493                  G1PPRL_BYTE_H_FORMAT
4494                  G1PPRL_DOUBLE_H_FORMAT,
4495                  "", "",
4496                  "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
4497 }
4498 
4499 // It takes as a parameter a reference to one of the _hum_* fields, it
4500 // deduces the corresponding value for a region in a humongous region
4501 // series (either the region size, or what's left if the _hum_* field
4502 // is < the region size), and updates the _hum_* field accordingly.
4503 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4504   size_t bytes = 0;
4505   // The > 0 check is to deal with the prev and next live bytes which
4506   // could be 0.
4507   if (*hum_bytes > 0) {
4508     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4509     *hum_bytes -= bytes;
4510   }
4511   return bytes;
4512 }
4513 
4514 // It deduces the values for a region in a humongous region series
4515 // from the _hum_* fields and updates those accordingly. It assumes
4516 // that that _hum_* fields have already been set up from the "starts
4517 // humongous" region and we visit the regions in address order.
4518 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4519                                                      size_t* capacity_bytes,
4520                                                      size_t* prev_live_bytes,
4521                                                      size_t* next_live_bytes) {
4522   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4523   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4524   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4525   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4526   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4527 }
4528 
4529 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4530   const char* type = "";
4531   HeapWord* bottom       = r->bottom();
4532   HeapWord* end          = r->end();
4533   size_t capacity_bytes  = r->capacity();
4534   size_t used_bytes      = r->used();
4535   size_t prev_live_bytes = r->live_bytes();
4536   size_t next_live_bytes = r->next_live_bytes();
4537   double gc_eff          = r->gc_efficiency();
4538   if (r->used() == 0) {
4539     type = "FREE";
4540   } else if (r->is_survivor()) {
4541     type = "SURV";
4542   } else if (r->is_young()) {
4543     type = "EDEN";
4544   } else if (r->startsHumongous()) {
4545     type = "HUMS";
4546 
4547     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4548            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4549            "they should have been zeroed after the last time we used them");
4550     // Set up the _hum_* fields.
4551     _hum_capacity_bytes  = capacity_bytes;
4552     _hum_used_bytes      = used_bytes;
4553     _hum_prev_live_bytes = prev_live_bytes;
4554     _hum_next_live_bytes = next_live_bytes;
4555     get_hum_bytes(&used_bytes, &capacity_bytes,
4556                   &prev_live_bytes, &next_live_bytes);
4557     end = bottom + HeapRegion::GrainWords;
4558   } else if (r->continuesHumongous()) {
4559     type = "HUMC";
4560     get_hum_bytes(&used_bytes, &capacity_bytes,
4561                   &prev_live_bytes, &next_live_bytes);
4562     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4563   } else {
4564     type = "OLD";
4565   }
4566 
4567   _total_used_bytes      += used_bytes;
4568   _total_capacity_bytes  += capacity_bytes;
4569   _total_prev_live_bytes += prev_live_bytes;
4570   _total_next_live_bytes += next_live_bytes;
4571 
4572   // Print a line for this particular region.
4573   _out->print_cr(G1PPRL_LINE_PREFIX
4574                  G1PPRL_TYPE_FORMAT
4575                  G1PPRL_ADDR_BASE_FORMAT
4576                  G1PPRL_BYTE_FORMAT
4577                  G1PPRL_BYTE_FORMAT
4578                  G1PPRL_BYTE_FORMAT
4579                  G1PPRL_DOUBLE_FORMAT,
4580                  type, bottom, end,
4581                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
4582 
4583   return false;
4584 }
4585 
4586 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4587   // Print the footer of the output.
4588   _out->print_cr(G1PPRL_LINE_PREFIX);
4589   _out->print_cr(G1PPRL_LINE_PREFIX
4590                  " SUMMARY"
4591                  G1PPRL_SUM_MB_FORMAT("capacity")
4592                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4593                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4594                  G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
4595                  bytes_to_mb(_total_capacity_bytes),
4596                  bytes_to_mb(_total_used_bytes),
4597                  perc(_total_used_bytes, _total_capacity_bytes),
4598                  bytes_to_mb(_total_prev_live_bytes),
4599                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4600                  bytes_to_mb(_total_next_live_bytes),
4601                  perc(_total_next_live_bytes, _total_capacity_bytes));
4602   _out->cr();
4603 }