1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "memory/genOopClosures.inline.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "services/memTracker.hpp"
  46 
  47 // Concurrent marking bit map wrapper
  48 
  49 CMBitMapRO::CMBitMapRO(int shifter) :
  50   _bm(),
  51   _shifter(shifter) {
  52   _bmStartWord = 0;
  53   _bmWordSize = 0;
  54 }
  55 
  56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  57                                                HeapWord* limit) const {
  58   // First we must round addr *up* to a possible object boundary.
  59   addr = (HeapWord*)align_size_up((intptr_t)addr,
  60                                   HeapWordSize << _shifter);
  61   size_t addrOffset = heapWordToOffset(addr);
  62   if (limit == NULL) {
  63     limit = _bmStartWord + _bmWordSize;
  64   }
  65   size_t limitOffset = heapWordToOffset(limit);
  66   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  67   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  68   assert(nextAddr >= addr, "get_next_one postcondition");
  69   assert(nextAddr == limit || isMarked(nextAddr),
  70          "get_next_one postcondition");
  71   return nextAddr;
  72 }
  73 
  74 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
  75                                                  HeapWord* limit) const {
  76   size_t addrOffset = heapWordToOffset(addr);
  77   if (limit == NULL) {
  78     limit = _bmStartWord + _bmWordSize;
  79   }
  80   size_t limitOffset = heapWordToOffset(limit);
  81   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  82   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  83   assert(nextAddr >= addr, "get_next_one postcondition");
  84   assert(nextAddr == limit || !isMarked(nextAddr),
  85          "get_next_one postcondition");
  86   return nextAddr;
  87 }
  88 
  89 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  90   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  91   return (int) (diff >> _shifter);
  92 }
  93 
  94 #ifndef PRODUCT
  95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
  96   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  97   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  98          "size inconsistency");
  99   return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
 100          _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 101 }
 102 #endif
 103 
 104 bool CMBitMap::allocate(ReservedSpace heap_rs) {
 105   _bmStartWord = (HeapWord*)(heap_rs.base());
 106   _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
 107   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
 108                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 109   if (!brs.is_reserved()) {
 110     warning("ConcurrentMark marking bit map allocation failure");
 111     return false;
 112   }
 113   MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
 114   // For now we'll just commit all of the bit map up front.
 115   // Later on we'll try to be more parsimonious with swap.
 116   if (!_virtual_space.initialize(brs, brs.size())) {
 117     warning("ConcurrentMark marking bit map backing store failure");
 118     return false;
 119   }
 120   assert(_virtual_space.committed_size() == brs.size(),
 121          "didn't reserve backing store for all of concurrent marking bit map?");
 122   _bm.set_map((uintptr_t*)_virtual_space.low());
 123   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
 124          _bmWordSize, "inconsistency in bit map sizing");
 125   _bm.set_size(_bmWordSize >> _shifter);
 126   return true;
 127 }
 128 
 129 void CMBitMap::clearAll() {
 130   _bm.clear();
 131   return;
 132 }
 133 
 134 void CMBitMap::markRange(MemRegion mr) {
 135   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 136   assert(!mr.is_empty(), "unexpected empty region");
 137   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 138           ((HeapWord *) mr.end())),
 139          "markRange memory region end is not card aligned");
 140   // convert address range into offset range
 141   _bm.at_put_range(heapWordToOffset(mr.start()),
 142                    heapWordToOffset(mr.end()), true);
 143 }
 144 
 145 void CMBitMap::clearRange(MemRegion mr) {
 146   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 147   assert(!mr.is_empty(), "unexpected empty region");
 148   // convert address range into offset range
 149   _bm.at_put_range(heapWordToOffset(mr.start()),
 150                    heapWordToOffset(mr.end()), false);
 151 }
 152 
 153 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 154                                             HeapWord* end_addr) {
 155   HeapWord* start = getNextMarkedWordAddress(addr);
 156   start = MIN2(start, end_addr);
 157   HeapWord* end   = getNextUnmarkedWordAddress(start);
 158   end = MIN2(end, end_addr);
 159   assert(start <= end, "Consistency check");
 160   MemRegion mr(start, end);
 161   if (!mr.is_empty()) {
 162     clearRange(mr);
 163   }
 164   return mr;
 165 }
 166 
 167 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 168   _base(NULL), _cm(cm)
 169 #ifdef ASSERT
 170   , _drain_in_progress(false)
 171   , _drain_in_progress_yields(false)
 172 #endif
 173 {}
 174 
 175 bool CMMarkStack::allocate(size_t capacity) {
 176   // allocate a stack of the requisite depth
 177   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 178   if (!rs.is_reserved()) {
 179     warning("ConcurrentMark MarkStack allocation failure");
 180     return false;
 181   }
 182   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 183   if (!_virtual_space.initialize(rs, rs.size())) {
 184     warning("ConcurrentMark MarkStack backing store failure");
 185     // Release the virtual memory reserved for the marking stack
 186     rs.release();
 187     return false;
 188   }
 189   assert(_virtual_space.committed_size() == rs.size(),
 190          "Didn't reserve backing store for all of ConcurrentMark stack?");
 191   _base = (oop*) _virtual_space.low();
 192   setEmpty();
 193   _capacity = (jint) capacity;
 194   _saved_index = -1;
 195   _should_expand = false;
 196   NOT_PRODUCT(_max_depth = 0);
 197   return true;
 198 }
 199 
 200 void CMMarkStack::expand() {
 201   // Called, during remark, if we've overflown the marking stack during marking.
 202   assert(isEmpty(), "stack should been emptied while handling overflow");
 203   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 204   // Clear expansion flag
 205   _should_expand = false;
 206   if (_capacity == (jint) MarkStackSizeMax) {
 207     if (PrintGCDetails && Verbose) {
 208       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 209     }
 210     return;
 211   }
 212   // Double capacity if possible
 213   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 214   // Do not give up existing stack until we have managed to
 215   // get the double capacity that we desired.
 216   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 217                                                            sizeof(oop)));
 218   if (rs.is_reserved()) {
 219     // Release the backing store associated with old stack
 220     _virtual_space.release();
 221     // Reinitialize virtual space for new stack
 222     if (!_virtual_space.initialize(rs, rs.size())) {
 223       fatal("Not enough swap for expanded marking stack capacity");
 224     }
 225     _base = (oop*)(_virtual_space.low());
 226     _index = 0;
 227     _capacity = new_capacity;
 228   } else {
 229     if (PrintGCDetails && Verbose) {
 230       // Failed to double capacity, continue;
 231       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 232                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 233                           _capacity / K, new_capacity / K);
 234     }
 235   }
 236 }
 237 
 238 void CMMarkStack::set_should_expand() {
 239   // If we're resetting the marking state because of an
 240   // marking stack overflow, record that we should, if
 241   // possible, expand the stack.
 242   _should_expand = _cm->has_overflown();
 243 }
 244 
 245 CMMarkStack::~CMMarkStack() {
 246   if (_base != NULL) {
 247     _base = NULL;
 248     _virtual_space.release();
 249   }
 250 }
 251 
 252 void CMMarkStack::par_push(oop ptr) {
 253   while (true) {
 254     if (isFull()) {
 255       _overflow = true;
 256       return;
 257     }
 258     // Otherwise...
 259     jint index = _index;
 260     jint next_index = index+1;
 261     jint res = Atomic::cmpxchg(next_index, &_index, index);
 262     if (res == index) {
 263       _base[index] = ptr;
 264       // Note that we don't maintain this atomically.  We could, but it
 265       // doesn't seem necessary.
 266       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 267       return;
 268     }
 269     // Otherwise, we need to try again.
 270   }
 271 }
 272 
 273 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 274   while (true) {
 275     if (isFull()) {
 276       _overflow = true;
 277       return;
 278     }
 279     // Otherwise...
 280     jint index = _index;
 281     jint next_index = index + n;
 282     if (next_index > _capacity) {
 283       _overflow = true;
 284       return;
 285     }
 286     jint res = Atomic::cmpxchg(next_index, &_index, index);
 287     if (res == index) {
 288       for (int i = 0; i < n; i++) {
 289         int  ind = index + i;
 290         assert(ind < _capacity, "By overflow test above.");
 291         _base[ind] = ptr_arr[i];
 292       }
 293       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 294       return;
 295     }
 296     // Otherwise, we need to try again.
 297   }
 298 }
 299 
 300 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 301   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 302   jint start = _index;
 303   jint next_index = start + n;
 304   if (next_index > _capacity) {
 305     _overflow = true;
 306     return;
 307   }
 308   // Otherwise.
 309   _index = next_index;
 310   for (int i = 0; i < n; i++) {
 311     int ind = start + i;
 312     assert(ind < _capacity, "By overflow test above.");
 313     _base[ind] = ptr_arr[i];
 314   }
 315   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 316 }
 317 
 318 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 319   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 320   jint index = _index;
 321   if (index == 0) {
 322     *n = 0;
 323     return false;
 324   } else {
 325     int k = MIN2(max, index);
 326     jint  new_ind = index - k;
 327     for (int j = 0; j < k; j++) {
 328       ptr_arr[j] = _base[new_ind + j];
 329     }
 330     _index = new_ind;
 331     *n = k;
 332     return true;
 333   }
 334 }
 335 
 336 template<class OopClosureClass>
 337 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 338   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 339          || SafepointSynchronize::is_at_safepoint(),
 340          "Drain recursion must be yield-safe.");
 341   bool res = true;
 342   debug_only(_drain_in_progress = true);
 343   debug_only(_drain_in_progress_yields = yield_after);
 344   while (!isEmpty()) {
 345     oop newOop = pop();
 346     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 347     assert(newOop->is_oop(), "Expected an oop");
 348     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 349            "only grey objects on this stack");
 350     newOop->oop_iterate(cl);
 351     if (yield_after && _cm->do_yield_check()) {
 352       res = false;
 353       break;
 354     }
 355   }
 356   debug_only(_drain_in_progress = false);
 357   return res;
 358 }
 359 
 360 void CMMarkStack::note_start_of_gc() {
 361   assert(_saved_index == -1,
 362          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 363   _saved_index = _index;
 364 }
 365 
 366 void CMMarkStack::note_end_of_gc() {
 367   // This is intentionally a guarantee, instead of an assert. If we
 368   // accidentally add something to the mark stack during GC, it
 369   // will be a correctness issue so it's better if we crash. we'll
 370   // only check this once per GC anyway, so it won't be a performance
 371   // issue in any way.
 372   guarantee(_saved_index == _index,
 373             err_msg("saved index: %d index: %d", _saved_index, _index));
 374   _saved_index = -1;
 375 }
 376 
 377 void CMMarkStack::oops_do(OopClosure* f) {
 378   assert(_saved_index == _index,
 379          err_msg("saved index: %d index: %d", _saved_index, _index));
 380   for (int i = 0; i < _index; i += 1) {
 381     f->do_oop(&_base[i]);
 382   }
 383 }
 384 
 385 bool ConcurrentMark::not_yet_marked(oop obj) const {
 386   return _g1h->is_obj_ill(obj);
 387 }
 388 
 389 CMRootRegions::CMRootRegions() :
 390   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 391   _should_abort(false),  _next_survivor(NULL) { }
 392 
 393 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 394   _young_list = g1h->young_list();
 395   _cm = cm;
 396 }
 397 
 398 void CMRootRegions::prepare_for_scan() {
 399   assert(!scan_in_progress(), "pre-condition");
 400 
 401   // Currently, only survivors can be root regions.
 402   assert(_next_survivor == NULL, "pre-condition");
 403   _next_survivor = _young_list->first_survivor_region();
 404   _scan_in_progress = (_next_survivor != NULL);
 405   _should_abort = false;
 406 }
 407 
 408 HeapRegion* CMRootRegions::claim_next() {
 409   if (_should_abort) {
 410     // If someone has set the should_abort flag, we return NULL to
 411     // force the caller to bail out of their loop.
 412     return NULL;
 413   }
 414 
 415   // Currently, only survivors can be root regions.
 416   HeapRegion* res = _next_survivor;
 417   if (res != NULL) {
 418     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 419     // Read it again in case it changed while we were waiting for the lock.
 420     res = _next_survivor;
 421     if (res != NULL) {
 422       if (res == _young_list->last_survivor_region()) {
 423         // We just claimed the last survivor so store NULL to indicate
 424         // that we're done.
 425         _next_survivor = NULL;
 426       } else {
 427         _next_survivor = res->get_next_young_region();
 428       }
 429     } else {
 430       // Someone else claimed the last survivor while we were trying
 431       // to take the lock so nothing else to do.
 432     }
 433   }
 434   assert(res == NULL || res->is_survivor(), "post-condition");
 435 
 436   return res;
 437 }
 438 
 439 void CMRootRegions::scan_finished() {
 440   assert(scan_in_progress(), "pre-condition");
 441 
 442   // Currently, only survivors can be root regions.
 443   if (!_should_abort) {
 444     assert(_next_survivor == NULL, "we should have claimed all survivors");
 445   }
 446   _next_survivor = NULL;
 447 
 448   {
 449     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 450     _scan_in_progress = false;
 451     RootRegionScan_lock->notify_all();
 452   }
 453 }
 454 
 455 bool CMRootRegions::wait_until_scan_finished() {
 456   if (!scan_in_progress()) return false;
 457 
 458   {
 459     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 460     while (scan_in_progress()) {
 461       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 462     }
 463   }
 464   return true;
 465 }
 466 
 467 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 468 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 469 #endif // _MSC_VER
 470 
 471 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 472   return MAX2((n_par_threads + 2) / 4, 1U);
 473 }
 474 
 475 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 476   _g1h(g1h),
 477   _markBitMap1(MinObjAlignment - 1),
 478   _markBitMap2(MinObjAlignment - 1),
 479 
 480   _parallel_marking_threads(0),
 481   _max_parallel_marking_threads(0),
 482   _sleep_factor(0.0),
 483   _marking_task_overhead(1.0),
 484   _cleanup_sleep_factor(0.0),
 485   _cleanup_task_overhead(1.0),
 486   _cleanup_list("Cleanup List"),
 487   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 488   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 489             CardTableModRefBS::card_shift,
 490             false /* in_resource_area*/),
 491 
 492   _prevMarkBitMap(&_markBitMap1),
 493   _nextMarkBitMap(&_markBitMap2),
 494 
 495   _markStack(this),
 496   // _finger set in set_non_marking_state
 497 
 498   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 499   // _active_tasks set in set_non_marking_state
 500   // _tasks set inside the constructor
 501   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 502   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 503 
 504   _has_overflown(false),
 505   _concurrent(false),
 506   _has_aborted(false),
 507   _restart_for_overflow(false),
 508   _concurrent_marking_in_progress(false),
 509 
 510   // _verbose_level set below
 511 
 512   _init_times(),
 513   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 514   _cleanup_times(),
 515   _total_counting_time(0.0),
 516   _total_rs_scrub_time(0.0),
 517 
 518   _parallel_workers(NULL),
 519 
 520   _count_card_bitmaps(NULL),
 521   _count_marked_bytes(NULL),
 522   _completed_initialization(false) {
 523   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 524   if (verbose_level < no_verbose) {
 525     verbose_level = no_verbose;
 526   }
 527   if (verbose_level > high_verbose) {
 528     verbose_level = high_verbose;
 529   }
 530   _verbose_level = verbose_level;
 531 
 532   if (verbose_low()) {
 533     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 534                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 535   }
 536 
 537   if (!_markBitMap1.allocate(heap_rs)) {
 538     warning("Failed to allocate first CM bit map");
 539     return;
 540   }
 541   if (!_markBitMap2.allocate(heap_rs)) {
 542     warning("Failed to allocate second CM bit map");
 543     return;
 544   }
 545 
 546   // Create & start a ConcurrentMark thread.
 547   _cmThread = new ConcurrentMarkThread(this);
 548   assert(cmThread() != NULL, "CM Thread should have been created");
 549   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 550 
 551   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 552   assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
 553   assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 554 
 555   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 556   satb_qs.set_buffer_size(G1SATBBufferSize);
 557 
 558   _root_regions.init(_g1h, this);
 559 
 560   if (ConcGCThreads > ParallelGCThreads) {
 561     warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
 562             "than ParallelGCThreads (" UINT32_FORMAT ").",
 563             ConcGCThreads, ParallelGCThreads);
 564     return;
 565   }
 566   if (ParallelGCThreads == 0) {
 567     // if we are not running with any parallel GC threads we will not
 568     // spawn any marking threads either
 569     _parallel_marking_threads =       0;
 570     _max_parallel_marking_threads =   0;
 571     _sleep_factor             =     0.0;
 572     _marking_task_overhead    =     1.0;
 573   } else {
 574     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 575       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 576       // if both are set
 577       _sleep_factor             = 0.0;
 578       _marking_task_overhead    = 1.0;
 579     } else if (G1MarkingOverheadPercent > 0) {
 580       // We will calculate the number of parallel marking threads based
 581       // on a target overhead with respect to the soft real-time goal
 582       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 583       double overall_cm_overhead =
 584         (double) MaxGCPauseMillis * marking_overhead /
 585         (double) GCPauseIntervalMillis;
 586       double cpu_ratio = 1.0 / (double) os::processor_count();
 587       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 588       double marking_task_overhead =
 589         overall_cm_overhead / marking_thread_num *
 590                                                 (double) os::processor_count();
 591       double sleep_factor =
 592                          (1.0 - marking_task_overhead) / marking_task_overhead;
 593 
 594       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 595       _sleep_factor             = sleep_factor;
 596       _marking_task_overhead    = marking_task_overhead;
 597     } else {
 598       // Calculate the number of parallel marking threads by scaling
 599       // the number of parallel GC threads.
 600       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 601       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 602       _sleep_factor             = 0.0;
 603       _marking_task_overhead    = 1.0;
 604     }
 605 
 606     assert(ConcGCThreads > 0, "Should have been set");
 607     _parallel_marking_threads = (uint) ConcGCThreads;
 608     _max_parallel_marking_threads = _parallel_marking_threads;
 609 
 610     if (parallel_marking_threads() > 1) {
 611       _cleanup_task_overhead = 1.0;
 612     } else {
 613       _cleanup_task_overhead = marking_task_overhead();
 614     }
 615     _cleanup_sleep_factor =
 616                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 617 
 618 #if 0
 619     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 620     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 621     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 622     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 623     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 624 #endif
 625 
 626     guarantee(parallel_marking_threads() > 0, "peace of mind");
 627     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 628          _max_parallel_marking_threads, false, true);
 629     if (_parallel_workers == NULL) {
 630       vm_exit_during_initialization("Failed necessary allocation.");
 631     } else {
 632       _parallel_workers->initialize_workers();
 633     }
 634   }
 635 
 636   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 637     uintx mark_stack_size =
 638       MIN2(MarkStackSizeMax,
 639           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 640     // Verify that the calculated value for MarkStackSize is in range.
 641     // It would be nice to use the private utility routine from Arguments.
 642     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 643       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 644               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 645               mark_stack_size, 1, MarkStackSizeMax);
 646       return;
 647     }
 648     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 649   } else {
 650     // Verify MarkStackSize is in range.
 651     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 652       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 653         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 654           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 655                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 656                   MarkStackSize, 1, MarkStackSizeMax);
 657           return;
 658         }
 659       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 660         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 661           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 662                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 663                   MarkStackSize, MarkStackSizeMax);
 664           return;
 665         }
 666       }
 667     }
 668   }
 669 
 670   if (!_markStack.allocate(MarkStackSize)) {
 671     warning("Failed to allocate CM marking stack");
 672     return;
 673   }
 674 
 675   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 676   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 677 
 678   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 679   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 680 
 681   BitMap::idx_t card_bm_size = _card_bm.size();
 682 
 683   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 684   _active_tasks = _max_worker_id;
 685 
 686   size_t max_regions = (size_t) _g1h->max_regions();
 687   for (uint i = 0; i < _max_worker_id; ++i) {
 688     CMTaskQueue* task_queue = new CMTaskQueue();
 689     task_queue->initialize();
 690     _task_queues->register_queue(i, task_queue);
 691 
 692     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 693     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 694 
 695     _tasks[i] = new CMTask(i, this,
 696                            _count_marked_bytes[i],
 697                            &_count_card_bitmaps[i],
 698                            task_queue, _task_queues);
 699 
 700     _accum_task_vtime[i] = 0.0;
 701   }
 702 
 703   // Calculate the card number for the bottom of the heap. Used
 704   // in biasing indexes into the accounting card bitmaps.
 705   _heap_bottom_card_num =
 706     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 707                                 CardTableModRefBS::card_shift);
 708 
 709   // Clear all the liveness counting data
 710   clear_all_count_data();
 711 
 712   // so that the call below can read a sensible value
 713   _heap_start = (HeapWord*) heap_rs.base();
 714   set_non_marking_state();
 715   _completed_initialization = true;
 716 }
 717 
 718 void ConcurrentMark::update_g1_committed(bool force) {
 719   // If concurrent marking is not in progress, then we do not need to
 720   // update _heap_end.
 721   if (!concurrent_marking_in_progress() && !force) return;
 722 
 723   MemRegion committed = _g1h->g1_committed();
 724   assert(committed.start() == _heap_start, "start shouldn't change");
 725   HeapWord* new_end = committed.end();
 726   if (new_end > _heap_end) {
 727     // The heap has been expanded.
 728 
 729     _heap_end = new_end;
 730   }
 731   // Notice that the heap can also shrink. However, this only happens
 732   // during a Full GC (at least currently) and the entire marking
 733   // phase will bail out and the task will not be restarted. So, let's
 734   // do nothing.
 735 }
 736 
 737 void ConcurrentMark::reset() {
 738   // Starting values for these two. This should be called in a STW
 739   // phase. CM will be notified of any future g1_committed expansions
 740   // will be at the end of evacuation pauses, when tasks are
 741   // inactive.
 742   MemRegion committed = _g1h->g1_committed();
 743   _heap_start = committed.start();
 744   _heap_end   = committed.end();
 745 
 746   // Separated the asserts so that we know which one fires.
 747   assert(_heap_start != NULL, "heap bounds should look ok");
 748   assert(_heap_end != NULL, "heap bounds should look ok");
 749   assert(_heap_start < _heap_end, "heap bounds should look ok");
 750 
 751   // Reset all the marking data structures and any necessary flags
 752   reset_marking_state();
 753 
 754   if (verbose_low()) {
 755     gclog_or_tty->print_cr("[global] resetting");
 756   }
 757 
 758   // We do reset all of them, since different phases will use
 759   // different number of active threads. So, it's easiest to have all
 760   // of them ready.
 761   for (uint i = 0; i < _max_worker_id; ++i) {
 762     _tasks[i]->reset(_nextMarkBitMap);
 763   }
 764 
 765   // we need this to make sure that the flag is on during the evac
 766   // pause with initial mark piggy-backed
 767   set_concurrent_marking_in_progress();
 768 }
 769 
 770 
 771 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 772   _markStack.set_should_expand();
 773   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 774   if (clear_overflow) {
 775     clear_has_overflown();
 776   } else {
 777     assert(has_overflown(), "pre-condition");
 778   }
 779   _finger = _heap_start;
 780 
 781   for (uint i = 0; i < _max_worker_id; ++i) {
 782     CMTaskQueue* queue = _task_queues->queue(i);
 783     queue->set_empty();
 784   }
 785 }
 786 
 787 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
 788   assert(active_tasks <= _max_worker_id, "we should not have more");
 789 
 790   _active_tasks = active_tasks;
 791   // Need to update the three data structures below according to the
 792   // number of active threads for this phase.
 793   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 794   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 795   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 796 
 797   _concurrent = concurrent;
 798   // We propagate this to all tasks, not just the active ones.
 799   for (uint i = 0; i < _max_worker_id; ++i)
 800     _tasks[i]->set_concurrent(concurrent);
 801 
 802   if (concurrent) {
 803     set_concurrent_marking_in_progress();
 804   } else {
 805     // We currently assume that the concurrent flag has been set to
 806     // false before we start remark. At this point we should also be
 807     // in a STW phase.
 808     assert(!concurrent_marking_in_progress(), "invariant");
 809     assert(_finger == _heap_end, "only way to get here");
 810     update_g1_committed(true);
 811   }
 812 }
 813 
 814 void ConcurrentMark::set_non_marking_state() {
 815   // We set the global marking state to some default values when we're
 816   // not doing marking.
 817   reset_marking_state();
 818   _active_tasks = 0;
 819   clear_concurrent_marking_in_progress();
 820 }
 821 
 822 ConcurrentMark::~ConcurrentMark() {
 823   // The ConcurrentMark instance is never freed.
 824   ShouldNotReachHere();
 825 }
 826 
 827 void ConcurrentMark::clearNextBitmap() {
 828   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 829   G1CollectorPolicy* g1p = g1h->g1_policy();
 830 
 831   // Make sure that the concurrent mark thread looks to still be in
 832   // the current cycle.
 833   guarantee(cmThread()->during_cycle(), "invariant");
 834 
 835   // We are finishing up the current cycle by clearing the next
 836   // marking bitmap and getting it ready for the next cycle. During
 837   // this time no other cycle can start. So, let's make sure that this
 838   // is the case.
 839   guarantee(!g1h->mark_in_progress(), "invariant");
 840 
 841   // clear the mark bitmap (no grey objects to start with).
 842   // We need to do this in chunks and offer to yield in between
 843   // each chunk.
 844   HeapWord* start  = _nextMarkBitMap->startWord();
 845   HeapWord* end    = _nextMarkBitMap->endWord();
 846   HeapWord* cur    = start;
 847   size_t chunkSize = M;
 848   while (cur < end) {
 849     HeapWord* next = cur + chunkSize;
 850     if (next > end) {
 851       next = end;
 852     }
 853     MemRegion mr(cur,next);
 854     _nextMarkBitMap->clearRange(mr);
 855     cur = next;
 856     do_yield_check();
 857 
 858     // Repeat the asserts from above. We'll do them as asserts here to
 859     // minimize their overhead on the product. However, we'll have
 860     // them as guarantees at the beginning / end of the bitmap
 861     // clearing to get some checking in the product.
 862     assert(cmThread()->during_cycle(), "invariant");
 863     assert(!g1h->mark_in_progress(), "invariant");
 864   }
 865 
 866   // Clear the liveness counting data
 867   clear_all_count_data();
 868 
 869   // Repeat the asserts from above.
 870   guarantee(cmThread()->during_cycle(), "invariant");
 871   guarantee(!g1h->mark_in_progress(), "invariant");
 872 }
 873 
 874 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 875 public:
 876   bool doHeapRegion(HeapRegion* r) {
 877     if (!r->continuesHumongous()) {
 878       r->note_start_of_marking();
 879     }
 880     return false;
 881   }
 882 };
 883 
 884 void ConcurrentMark::checkpointRootsInitialPre() {
 885   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 886   G1CollectorPolicy* g1p = g1h->g1_policy();
 887 
 888   _has_aborted = false;
 889 
 890 #ifndef PRODUCT
 891   if (G1PrintReachableAtInitialMark) {
 892     print_reachable("at-cycle-start",
 893                     VerifyOption_G1UsePrevMarking, true /* all */);
 894   }
 895 #endif
 896 
 897   // Initialise marking structures. This has to be done in a STW phase.
 898   reset();
 899 
 900   // For each region note start of marking.
 901   NoteStartOfMarkHRClosure startcl;
 902   g1h->heap_region_iterate(&startcl);
 903 }
 904 
 905 
 906 void ConcurrentMark::checkpointRootsInitialPost() {
 907   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 908 
 909   // If we force an overflow during remark, the remark operation will
 910   // actually abort and we'll restart concurrent marking. If we always
 911   // force an oveflow during remark we'll never actually complete the
 912   // marking phase. So, we initilize this here, at the start of the
 913   // cycle, so that at the remaining overflow number will decrease at
 914   // every remark and we'll eventually not need to cause one.
 915   force_overflow_stw()->init();
 916 
 917   // Start Concurrent Marking weak-reference discovery.
 918   ReferenceProcessor* rp = g1h->ref_processor_cm();
 919   // enable ("weak") refs discovery
 920   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 921   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 922 
 923   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 924   // This is the start of  the marking cycle, we're expected all
 925   // threads to have SATB queues with active set to false.
 926   satb_mq_set.set_active_all_threads(true, /* new active value */
 927                                      false /* expected_active */);
 928 
 929   _root_regions.prepare_for_scan();
 930 
 931   // update_g1_committed() will be called at the end of an evac pause
 932   // when marking is on. So, it's also called at the end of the
 933   // initial-mark pause to update the heap end, if the heap expands
 934   // during it. No need to call it here.
 935 }
 936 
 937 /*
 938  * Notice that in the next two methods, we actually leave the STS
 939  * during the barrier sync and join it immediately afterwards. If we
 940  * do not do this, the following deadlock can occur: one thread could
 941  * be in the barrier sync code, waiting for the other thread to also
 942  * sync up, whereas another one could be trying to yield, while also
 943  * waiting for the other threads to sync up too.
 944  *
 945  * Note, however, that this code is also used during remark and in
 946  * this case we should not attempt to leave / enter the STS, otherwise
 947  * we'll either hit an asseert (debug / fastdebug) or deadlock
 948  * (product). So we should only leave / enter the STS if we are
 949  * operating concurrently.
 950  *
 951  * Because the thread that does the sync barrier has left the STS, it
 952  * is possible to be suspended for a Full GC or an evacuation pause
 953  * could occur. This is actually safe, since the entering the sync
 954  * barrier is one of the last things do_marking_step() does, and it
 955  * doesn't manipulate any data structures afterwards.
 956  */
 957 
 958 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 959   if (verbose_low()) {
 960     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 961   }
 962 
 963   if (concurrent()) {
 964     ConcurrentGCThread::stsLeave();
 965   }
 966   _first_overflow_barrier_sync.enter();
 967   if (concurrent()) {
 968     ConcurrentGCThread::stsJoin();
 969   }
 970   // at this point everyone should have synced up and not be doing any
 971   // more work
 972 
 973   if (verbose_low()) {
 974     gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 975   }
 976 
 977   // let the task associated with with worker 0 do this
 978   if (worker_id == 0) {
 979     // task 0 is responsible for clearing the global data structures
 980     // We should be here because of an overflow. During STW we should
 981     // not clear the overflow flag since we rely on it being true when
 982     // we exit this method to abort the pause and restart concurent
 983     // marking.
 984     reset_marking_state(concurrent() /* clear_overflow */);
 985     force_overflow()->update();
 986 
 987     if (G1Log::fine()) {
 988       gclog_or_tty->date_stamp(PrintGCDateStamps);
 989       gclog_or_tty->stamp(PrintGCTimeStamps);
 990       gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 991     }
 992   }
 993 
 994   // after this, each task should reset its own data structures then
 995   // then go into the second barrier
 996 }
 997 
 998 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 999   if (verbose_low()) {
1000     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1001   }
1002 
1003   if (concurrent()) {
1004     ConcurrentGCThread::stsLeave();
1005   }
1006   _second_overflow_barrier_sync.enter();
1007   if (concurrent()) {
1008     ConcurrentGCThread::stsJoin();
1009   }
1010   // at this point everything should be re-initialised and ready to go
1011 
1012   if (verbose_low()) {
1013     gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1014   }
1015 }
1016 
1017 #ifndef PRODUCT
1018 void ForceOverflowSettings::init() {
1019   _num_remaining = G1ConcMarkForceOverflow;
1020   _force = false;
1021   update();
1022 }
1023 
1024 void ForceOverflowSettings::update() {
1025   if (_num_remaining > 0) {
1026     _num_remaining -= 1;
1027     _force = true;
1028   } else {
1029     _force = false;
1030   }
1031 }
1032 
1033 bool ForceOverflowSettings::should_force() {
1034   if (_force) {
1035     _force = false;
1036     return true;
1037   } else {
1038     return false;
1039   }
1040 }
1041 #endif // !PRODUCT
1042 
1043 class CMConcurrentMarkingTask: public AbstractGangTask {
1044 private:
1045   ConcurrentMark*       _cm;
1046   ConcurrentMarkThread* _cmt;
1047 
1048 public:
1049   void work(uint worker_id) {
1050     assert(Thread::current()->is_ConcurrentGC_thread(),
1051            "this should only be done by a conc GC thread");
1052     ResourceMark rm;
1053 
1054     double start_vtime = os::elapsedVTime();
1055 
1056     ConcurrentGCThread::stsJoin();
1057 
1058     assert(worker_id < _cm->active_tasks(), "invariant");
1059     CMTask* the_task = _cm->task(worker_id);
1060     the_task->record_start_time();
1061     if (!_cm->has_aborted()) {
1062       do {
1063         double start_vtime_sec = os::elapsedVTime();
1064         double start_time_sec = os::elapsedTime();
1065         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1066 
1067         the_task->do_marking_step(mark_step_duration_ms,
1068                                   true /* do_stealing    */,
1069                                   true /* do_termination */);
1070 
1071         double end_time_sec = os::elapsedTime();
1072         double end_vtime_sec = os::elapsedVTime();
1073         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1074         double elapsed_time_sec = end_time_sec - start_time_sec;
1075         _cm->clear_has_overflown();
1076 
1077         bool ret = _cm->do_yield_check(worker_id);
1078 
1079         jlong sleep_time_ms;
1080         if (!_cm->has_aborted() && the_task->has_aborted()) {
1081           sleep_time_ms =
1082             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1083           ConcurrentGCThread::stsLeave();
1084           os::sleep(Thread::current(), sleep_time_ms, false);
1085           ConcurrentGCThread::stsJoin();
1086         }
1087         double end_time2_sec = os::elapsedTime();
1088         double elapsed_time2_sec = end_time2_sec - start_time_sec;
1089 
1090 #if 0
1091           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1092                                  "overhead %1.4lf",
1093                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1094                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
1095           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1096                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1097 #endif
1098       } while (!_cm->has_aborted() && the_task->has_aborted());
1099     }
1100     the_task->record_end_time();
1101     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1102 
1103     ConcurrentGCThread::stsLeave();
1104 
1105     double end_vtime = os::elapsedVTime();
1106     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1107   }
1108 
1109   CMConcurrentMarkingTask(ConcurrentMark* cm,
1110                           ConcurrentMarkThread* cmt) :
1111       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1112 
1113   ~CMConcurrentMarkingTask() { }
1114 };
1115 
1116 // Calculates the number of active workers for a concurrent
1117 // phase.
1118 uint ConcurrentMark::calc_parallel_marking_threads() {
1119   if (G1CollectedHeap::use_parallel_gc_threads()) {
1120     uint n_conc_workers = 0;
1121     if (!UseDynamicNumberOfGCThreads ||
1122         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1123          !ForceDynamicNumberOfGCThreads)) {
1124       n_conc_workers = max_parallel_marking_threads();
1125     } else {
1126       n_conc_workers =
1127         AdaptiveSizePolicy::calc_default_active_workers(
1128                                      max_parallel_marking_threads(),
1129                                      1, /* Minimum workers */
1130                                      parallel_marking_threads(),
1131                                      Threads::number_of_non_daemon_threads());
1132       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1133       // that scaling has already gone into "_max_parallel_marking_threads".
1134     }
1135     assert(n_conc_workers > 0, "Always need at least 1");
1136     return n_conc_workers;
1137   }
1138   // If we are not running with any parallel GC threads we will not
1139   // have spawned any marking threads either. Hence the number of
1140   // concurrent workers should be 0.
1141   return 0;
1142 }
1143 
1144 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1145   // Currently, only survivors can be root regions.
1146   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1147   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1148 
1149   const uintx interval = PrefetchScanIntervalInBytes;
1150   HeapWord* curr = hr->bottom();
1151   const HeapWord* end = hr->top();
1152   while (curr < end) {
1153     Prefetch::read(curr, interval);
1154     oop obj = oop(curr);
1155     int size = obj->oop_iterate(&cl);
1156     assert(size == obj->size(), "sanity");
1157     curr += size;
1158   }
1159 }
1160 
1161 class CMRootRegionScanTask : public AbstractGangTask {
1162 private:
1163   ConcurrentMark* _cm;
1164 
1165 public:
1166   CMRootRegionScanTask(ConcurrentMark* cm) :
1167     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1168 
1169   void work(uint worker_id) {
1170     assert(Thread::current()->is_ConcurrentGC_thread(),
1171            "this should only be done by a conc GC thread");
1172 
1173     CMRootRegions* root_regions = _cm->root_regions();
1174     HeapRegion* hr = root_regions->claim_next();
1175     while (hr != NULL) {
1176       _cm->scanRootRegion(hr, worker_id);
1177       hr = root_regions->claim_next();
1178     }
1179   }
1180 };
1181 
1182 void ConcurrentMark::scanRootRegions() {
1183   // scan_in_progress() will have been set to true only if there was
1184   // at least one root region to scan. So, if it's false, we
1185   // should not attempt to do any further work.
1186   if (root_regions()->scan_in_progress()) {
1187     _parallel_marking_threads = calc_parallel_marking_threads();
1188     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1189            "Maximum number of marking threads exceeded");
1190     uint active_workers = MAX2(1U, parallel_marking_threads());
1191 
1192     CMRootRegionScanTask task(this);
1193     if (use_parallel_marking_threads()) {
1194       _parallel_workers->set_active_workers((int) active_workers);
1195       _parallel_workers->run_task(&task);
1196     } else {
1197       task.work(0);
1198     }
1199 
1200     // It's possible that has_aborted() is true here without actually
1201     // aborting the survivor scan earlier. This is OK as it's
1202     // mainly used for sanity checking.
1203     root_regions()->scan_finished();
1204   }
1205 }
1206 
1207 void ConcurrentMark::markFromRoots() {
1208   // we might be tempted to assert that:
1209   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1210   //        "inconsistent argument?");
1211   // However that wouldn't be right, because it's possible that
1212   // a safepoint is indeed in progress as a younger generation
1213   // stop-the-world GC happens even as we mark in this generation.
1214 
1215   _restart_for_overflow = false;
1216   force_overflow_conc()->init();
1217 
1218   // _g1h has _n_par_threads
1219   _parallel_marking_threads = calc_parallel_marking_threads();
1220   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1221     "Maximum number of marking threads exceeded");
1222 
1223   uint active_workers = MAX2(1U, parallel_marking_threads());
1224 
1225   // Parallel task terminator is set in "set_phase()"
1226   set_phase(active_workers, true /* concurrent */);
1227 
1228   CMConcurrentMarkingTask markingTask(this, cmThread());
1229   if (use_parallel_marking_threads()) {
1230     _parallel_workers->set_active_workers((int)active_workers);
1231     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1232     // and the decisions on that MT processing is made elsewhere.
1233     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1234     _parallel_workers->run_task(&markingTask);
1235   } else {
1236     markingTask.work(0);
1237   }
1238   print_stats();
1239 }
1240 
1241 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1242   // world is stopped at this checkpoint
1243   assert(SafepointSynchronize::is_at_safepoint(),
1244          "world should be stopped");
1245 
1246   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1247 
1248   // If a full collection has happened, we shouldn't do this.
1249   if (has_aborted()) {
1250     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1251     return;
1252   }
1253 
1254   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1255 
1256   if (VerifyDuringGC) {
1257     HandleMark hm;  // handle scope
1258     gclog_or_tty->print(" VerifyDuringGC:(before)");
1259     Universe::heap()->prepare_for_verify();
1260     Universe::verify(/* silent */ false,
1261                      /* option */ VerifyOption_G1UsePrevMarking);
1262   }
1263 
1264   G1CollectorPolicy* g1p = g1h->g1_policy();
1265   g1p->record_concurrent_mark_remark_start();
1266 
1267   double start = os::elapsedTime();
1268 
1269   checkpointRootsFinalWork();
1270 
1271   double mark_work_end = os::elapsedTime();
1272 
1273   weakRefsWork(clear_all_soft_refs);
1274 
1275   if (has_overflown()) {
1276     // Oops.  We overflowed.  Restart concurrent marking.
1277     _restart_for_overflow = true;
1278     // Clear the marking state because we will be restarting
1279     // marking due to overflowing the global mark stack.
1280     reset_marking_state();
1281     if (G1TraceMarkStackOverflow) {
1282       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1283     }
1284   } else {
1285     // Aggregate the per-task counting data that we have accumulated
1286     // while marking.
1287     aggregate_count_data();
1288 
1289     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1290     // We're done with marking.
1291     // This is the end of  the marking cycle, we're expected all
1292     // threads to have SATB queues with active set to true.
1293     satb_mq_set.set_active_all_threads(false, /* new active value */
1294                                        true /* expected_active */);
1295 
1296     if (VerifyDuringGC) {
1297       HandleMark hm;  // handle scope
1298       gclog_or_tty->print(" VerifyDuringGC:(after)");
1299       Universe::heap()->prepare_for_verify();
1300       Universe::verify(/* silent */ false,
1301                        /* option */ VerifyOption_G1UseNextMarking);
1302     }
1303     assert(!restart_for_overflow(), "sanity");
1304     // Completely reset the marking state since marking completed
1305     set_non_marking_state();
1306   }
1307 
1308   // Expand the marking stack, if we have to and if we can.
1309   if (_markStack.should_expand()) {
1310     _markStack.expand();
1311   }
1312 
1313 #if VERIFY_OBJS_PROCESSED
1314   _scan_obj_cl.objs_processed = 0;
1315   ThreadLocalObjQueue::objs_enqueued = 0;
1316 #endif
1317 
1318   // Statistics
1319   double now = os::elapsedTime();
1320   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1321   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1322   _remark_times.add((now - start) * 1000.0);
1323 
1324   g1p->record_concurrent_mark_remark_end();
1325 }
1326 
1327 // Base class of the closures that finalize and verify the
1328 // liveness counting data.
1329 class CMCountDataClosureBase: public HeapRegionClosure {
1330 protected:
1331   G1CollectedHeap* _g1h;
1332   ConcurrentMark* _cm;
1333   CardTableModRefBS* _ct_bs;
1334 
1335   BitMap* _region_bm;
1336   BitMap* _card_bm;
1337 
1338   // Takes a region that's not empty (i.e., it has at least one
1339   // live object in it and sets its corresponding bit on the region
1340   // bitmap to 1. If the region is "starts humongous" it will also set
1341   // to 1 the bits on the region bitmap that correspond to its
1342   // associated "continues humongous" regions.
1343   void set_bit_for_region(HeapRegion* hr) {
1344     assert(!hr->continuesHumongous(), "should have filtered those out");
1345 
1346     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1347     if (!hr->startsHumongous()) {
1348       // Normal (non-humongous) case: just set the bit.
1349       _region_bm->par_at_put(index, true);
1350     } else {
1351       // Starts humongous case: calculate how many regions are part of
1352       // this humongous region and then set the bit range.
1353       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1354       _region_bm->par_at_put_range(index, end_index, true);
1355     }
1356   }
1357 
1358 public:
1359   CMCountDataClosureBase(G1CollectedHeap* g1h,
1360                          BitMap* region_bm, BitMap* card_bm):
1361     _g1h(g1h), _cm(g1h->concurrent_mark()),
1362     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1363     _region_bm(region_bm), _card_bm(card_bm) { }
1364 };
1365 
1366 // Closure that calculates the # live objects per region. Used
1367 // for verification purposes during the cleanup pause.
1368 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1369   CMBitMapRO* _bm;
1370   size_t _region_marked_bytes;
1371 
1372 public:
1373   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1374                          BitMap* region_bm, BitMap* card_bm) :
1375     CMCountDataClosureBase(g1h, region_bm, card_bm),
1376     _bm(bm), _region_marked_bytes(0) { }
1377 
1378   bool doHeapRegion(HeapRegion* hr) {
1379 
1380     if (hr->continuesHumongous()) {
1381       // We will ignore these here and process them when their
1382       // associated "starts humongous" region is processed (see
1383       // set_bit_for_heap_region()). Note that we cannot rely on their
1384       // associated "starts humongous" region to have their bit set to
1385       // 1 since, due to the region chunking in the parallel region
1386       // iteration, a "continues humongous" region might be visited
1387       // before its associated "starts humongous".
1388       return false;
1389     }
1390 
1391     HeapWord* ntams = hr->next_top_at_mark_start();
1392     HeapWord* start = hr->bottom();
1393 
1394     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1395            err_msg("Preconditions not met - "
1396                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1397                    start, ntams, hr->end()));
1398 
1399     // Find the first marked object at or after "start".
1400     start = _bm->getNextMarkedWordAddress(start, ntams);
1401 
1402     size_t marked_bytes = 0;
1403 
1404     while (start < ntams) {
1405       oop obj = oop(start);
1406       int obj_sz = obj->size();
1407       HeapWord* obj_end = start + obj_sz;
1408 
1409       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1410       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1411 
1412       // Note: if we're looking at the last region in heap - obj_end
1413       // could be actually just beyond the end of the heap; end_idx
1414       // will then correspond to a (non-existent) card that is also
1415       // just beyond the heap.
1416       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1417         // end of object is not card aligned - increment to cover
1418         // all the cards spanned by the object
1419         end_idx += 1;
1420       }
1421 
1422       // Set the bits in the card BM for the cards spanned by this object.
1423       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1424 
1425       // Add the size of this object to the number of marked bytes.
1426       marked_bytes += (size_t)obj_sz * HeapWordSize;
1427 
1428       // Find the next marked object after this one.
1429       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1430     }
1431 
1432     // Mark the allocated-since-marking portion...
1433     HeapWord* top = hr->top();
1434     if (ntams < top) {
1435       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1436       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1437 
1438       // Note: if we're looking at the last region in heap - top
1439       // could be actually just beyond the end of the heap; end_idx
1440       // will then correspond to a (non-existent) card that is also
1441       // just beyond the heap.
1442       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1443         // end of object is not card aligned - increment to cover
1444         // all the cards spanned by the object
1445         end_idx += 1;
1446       }
1447       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1448 
1449       // This definitely means the region has live objects.
1450       set_bit_for_region(hr);
1451     }
1452 
1453     // Update the live region bitmap.
1454     if (marked_bytes > 0) {
1455       set_bit_for_region(hr);
1456     }
1457 
1458     // Set the marked bytes for the current region so that
1459     // it can be queried by a calling verificiation routine
1460     _region_marked_bytes = marked_bytes;
1461 
1462     return false;
1463   }
1464 
1465   size_t region_marked_bytes() const { return _region_marked_bytes; }
1466 };
1467 
1468 // Heap region closure used for verifying the counting data
1469 // that was accumulated concurrently and aggregated during
1470 // the remark pause. This closure is applied to the heap
1471 // regions during the STW cleanup pause.
1472 
1473 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1474   G1CollectedHeap* _g1h;
1475   ConcurrentMark* _cm;
1476   CalcLiveObjectsClosure _calc_cl;
1477   BitMap* _region_bm;   // Region BM to be verified
1478   BitMap* _card_bm;     // Card BM to be verified
1479   bool _verbose;        // verbose output?
1480 
1481   BitMap* _exp_region_bm; // Expected Region BM values
1482   BitMap* _exp_card_bm;   // Expected card BM values
1483 
1484   int _failures;
1485 
1486 public:
1487   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1488                                 BitMap* region_bm,
1489                                 BitMap* card_bm,
1490                                 BitMap* exp_region_bm,
1491                                 BitMap* exp_card_bm,
1492                                 bool verbose) :
1493     _g1h(g1h), _cm(g1h->concurrent_mark()),
1494     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1495     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1496     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1497     _failures(0) { }
1498 
1499   int failures() const { return _failures; }
1500 
1501   bool doHeapRegion(HeapRegion* hr) {
1502     if (hr->continuesHumongous()) {
1503       // We will ignore these here and process them when their
1504       // associated "starts humongous" region is processed (see
1505       // set_bit_for_heap_region()). Note that we cannot rely on their
1506       // associated "starts humongous" region to have their bit set to
1507       // 1 since, due to the region chunking in the parallel region
1508       // iteration, a "continues humongous" region might be visited
1509       // before its associated "starts humongous".
1510       return false;
1511     }
1512 
1513     int failures = 0;
1514 
1515     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1516     // this region and set the corresponding bits in the expected region
1517     // and card bitmaps.
1518     bool res = _calc_cl.doHeapRegion(hr);
1519     assert(res == false, "should be continuing");
1520 
1521     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1522                     Mutex::_no_safepoint_check_flag);
1523 
1524     // Verify the marked bytes for this region.
1525     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1526     size_t act_marked_bytes = hr->next_marked_bytes();
1527 
1528     // We're not OK if expected marked bytes > actual marked bytes. It means
1529     // we have missed accounting some objects during the actual marking.
1530     if (exp_marked_bytes > act_marked_bytes) {
1531       if (_verbose) {
1532         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1533                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1534                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1535       }
1536       failures += 1;
1537     }
1538 
1539     // Verify the bit, for this region, in the actual and expected
1540     // (which was just calculated) region bit maps.
1541     // We're not OK if the bit in the calculated expected region
1542     // bitmap is set and the bit in the actual region bitmap is not.
1543     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1544 
1545     bool expected = _exp_region_bm->at(index);
1546     bool actual = _region_bm->at(index);
1547     if (expected && !actual) {
1548       if (_verbose) {
1549         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1550                                "expected: %s, actual: %s",
1551                                hr->hrs_index(),
1552                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1553       }
1554       failures += 1;
1555     }
1556 
1557     // Verify that the card bit maps for the cards spanned by the current
1558     // region match. We have an error if we have a set bit in the expected
1559     // bit map and the corresponding bit in the actual bitmap is not set.
1560 
1561     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1562     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1563 
1564     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1565       expected = _exp_card_bm->at(i);
1566       actual = _card_bm->at(i);
1567 
1568       if (expected && !actual) {
1569         if (_verbose) {
1570           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1571                                  "expected: %s, actual: %s",
1572                                  hr->hrs_index(), i,
1573                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1574         }
1575         failures += 1;
1576       }
1577     }
1578 
1579     if (failures > 0 && _verbose)  {
1580       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1581                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1582                              HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1583                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1584     }
1585 
1586     _failures += failures;
1587 
1588     // We could stop iteration over the heap when we
1589     // find the first violating region by returning true.
1590     return false;
1591   }
1592 };
1593 
1594 
1595 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1596 protected:
1597   G1CollectedHeap* _g1h;
1598   ConcurrentMark* _cm;
1599   BitMap* _actual_region_bm;
1600   BitMap* _actual_card_bm;
1601 
1602   uint    _n_workers;
1603 
1604   BitMap* _expected_region_bm;
1605   BitMap* _expected_card_bm;
1606 
1607   int  _failures;
1608   bool _verbose;
1609 
1610 public:
1611   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1612                             BitMap* region_bm, BitMap* card_bm,
1613                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1614     : AbstractGangTask("G1 verify final counting"),
1615       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1616       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1617       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1618       _failures(0), _verbose(false),
1619       _n_workers(0) {
1620     assert(VerifyDuringGC, "don't call this otherwise");
1621 
1622     // Use the value already set as the number of active threads
1623     // in the call to run_task().
1624     if (G1CollectedHeap::use_parallel_gc_threads()) {
1625       assert( _g1h->workers()->active_workers() > 0,
1626         "Should have been previously set");
1627       _n_workers = _g1h->workers()->active_workers();
1628     } else {
1629       _n_workers = 1;
1630     }
1631 
1632     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1633     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1634 
1635     _verbose = _cm->verbose_medium();
1636   }
1637 
1638   void work(uint worker_id) {
1639     assert(worker_id < _n_workers, "invariant");
1640 
1641     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1642                                             _actual_region_bm, _actual_card_bm,
1643                                             _expected_region_bm,
1644                                             _expected_card_bm,
1645                                             _verbose);
1646 
1647     if (G1CollectedHeap::use_parallel_gc_threads()) {
1648       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1649                                             worker_id,
1650                                             _n_workers,
1651                                             HeapRegion::VerifyCountClaimValue);
1652     } else {
1653       _g1h->heap_region_iterate(&verify_cl);
1654     }
1655 
1656     Atomic::add(verify_cl.failures(), &_failures);
1657   }
1658 
1659   int failures() const { return _failures; }
1660 };
1661 
1662 // Closure that finalizes the liveness counting data.
1663 // Used during the cleanup pause.
1664 // Sets the bits corresponding to the interval [NTAMS, top]
1665 // (which contains the implicitly live objects) in the
1666 // card liveness bitmap. Also sets the bit for each region,
1667 // containing live data, in the region liveness bitmap.
1668 
1669 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1670  public:
1671   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1672                               BitMap* region_bm,
1673                               BitMap* card_bm) :
1674     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1675 
1676   bool doHeapRegion(HeapRegion* hr) {
1677 
1678     if (hr->continuesHumongous()) {
1679       // We will ignore these here and process them when their
1680       // associated "starts humongous" region is processed (see
1681       // set_bit_for_heap_region()). Note that we cannot rely on their
1682       // associated "starts humongous" region to have their bit set to
1683       // 1 since, due to the region chunking in the parallel region
1684       // iteration, a "continues humongous" region might be visited
1685       // before its associated "starts humongous".
1686       return false;
1687     }
1688 
1689     HeapWord* ntams = hr->next_top_at_mark_start();
1690     HeapWord* top   = hr->top();
1691 
1692     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1693 
1694     // Mark the allocated-since-marking portion...
1695     if (ntams < top) {
1696       // This definitely means the region has live objects.
1697       set_bit_for_region(hr);
1698 
1699       // Now set the bits in the card bitmap for [ntams, top)
1700       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1701       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1702 
1703       // Note: if we're looking at the last region in heap - top
1704       // could be actually just beyond the end of the heap; end_idx
1705       // will then correspond to a (non-existent) card that is also
1706       // just beyond the heap.
1707       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1708         // end of object is not card aligned - increment to cover
1709         // all the cards spanned by the object
1710         end_idx += 1;
1711       }
1712 
1713       assert(end_idx <= _card_bm->size(),
1714              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1715                      end_idx, _card_bm->size()));
1716       assert(start_idx < _card_bm->size(),
1717              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1718                      start_idx, _card_bm->size()));
1719 
1720       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1721     }
1722 
1723     // Set the bit for the region if it contains live data
1724     if (hr->next_marked_bytes() > 0) {
1725       set_bit_for_region(hr);
1726     }
1727 
1728     return false;
1729   }
1730 };
1731 
1732 class G1ParFinalCountTask: public AbstractGangTask {
1733 protected:
1734   G1CollectedHeap* _g1h;
1735   ConcurrentMark* _cm;
1736   BitMap* _actual_region_bm;
1737   BitMap* _actual_card_bm;
1738 
1739   uint    _n_workers;
1740 
1741 public:
1742   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1743     : AbstractGangTask("G1 final counting"),
1744       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1745       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1746       _n_workers(0) {
1747     // Use the value already set as the number of active threads
1748     // in the call to run_task().
1749     if (G1CollectedHeap::use_parallel_gc_threads()) {
1750       assert( _g1h->workers()->active_workers() > 0,
1751         "Should have been previously set");
1752       _n_workers = _g1h->workers()->active_workers();
1753     } else {
1754       _n_workers = 1;
1755     }
1756   }
1757 
1758   void work(uint worker_id) {
1759     assert(worker_id < _n_workers, "invariant");
1760 
1761     FinalCountDataUpdateClosure final_update_cl(_g1h,
1762                                                 _actual_region_bm,
1763                                                 _actual_card_bm);
1764 
1765     if (G1CollectedHeap::use_parallel_gc_threads()) {
1766       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1767                                             worker_id,
1768                                             _n_workers,
1769                                             HeapRegion::FinalCountClaimValue);
1770     } else {
1771       _g1h->heap_region_iterate(&final_update_cl);
1772     }
1773   }
1774 };
1775 
1776 class G1ParNoteEndTask;
1777 
1778 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1779   G1CollectedHeap* _g1;
1780   int _worker_num;
1781   size_t _max_live_bytes;
1782   uint _regions_claimed;
1783   size_t _freed_bytes;
1784   FreeRegionList* _local_cleanup_list;
1785   OldRegionSet* _old_proxy_set;
1786   HumongousRegionSet* _humongous_proxy_set;
1787   HRRSCleanupTask* _hrrs_cleanup_task;
1788   double _claimed_region_time;
1789   double _max_region_time;
1790 
1791 public:
1792   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1793                              int worker_num,
1794                              FreeRegionList* local_cleanup_list,
1795                              OldRegionSet* old_proxy_set,
1796                              HumongousRegionSet* humongous_proxy_set,
1797                              HRRSCleanupTask* hrrs_cleanup_task) :
1798     _g1(g1), _worker_num(worker_num),
1799     _max_live_bytes(0), _regions_claimed(0),
1800     _freed_bytes(0),
1801     _claimed_region_time(0.0), _max_region_time(0.0),
1802     _local_cleanup_list(local_cleanup_list),
1803     _old_proxy_set(old_proxy_set),
1804     _humongous_proxy_set(humongous_proxy_set),
1805     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1806 
1807   size_t freed_bytes() { return _freed_bytes; }
1808 
1809   bool doHeapRegion(HeapRegion *hr) {
1810     if (hr->continuesHumongous()) {
1811       return false;
1812     }
1813     // We use a claim value of zero here because all regions
1814     // were claimed with value 1 in the FinalCount task.
1815     _g1->reset_gc_time_stamps(hr);
1816     double start = os::elapsedTime();
1817     _regions_claimed++;
1818     hr->note_end_of_marking();
1819     _max_live_bytes += hr->max_live_bytes();
1820     _g1->free_region_if_empty(hr,
1821                               &_freed_bytes,
1822                               _local_cleanup_list,
1823                               _old_proxy_set,
1824                               _humongous_proxy_set,
1825                               _hrrs_cleanup_task,
1826                               true /* par */);
1827     double region_time = (os::elapsedTime() - start);
1828     _claimed_region_time += region_time;
1829     if (region_time > _max_region_time) {
1830       _max_region_time = region_time;
1831     }
1832     return false;
1833   }
1834 
1835   size_t max_live_bytes() { return _max_live_bytes; }
1836   uint regions_claimed() { return _regions_claimed; }
1837   double claimed_region_time_sec() { return _claimed_region_time; }
1838   double max_region_time_sec() { return _max_region_time; }
1839 };
1840 
1841 class G1ParNoteEndTask: public AbstractGangTask {
1842   friend class G1NoteEndOfConcMarkClosure;
1843 
1844 protected:
1845   G1CollectedHeap* _g1h;
1846   size_t _max_live_bytes;
1847   size_t _freed_bytes;
1848   FreeRegionList* _cleanup_list;
1849 
1850 public:
1851   G1ParNoteEndTask(G1CollectedHeap* g1h,
1852                    FreeRegionList* cleanup_list) :
1853     AbstractGangTask("G1 note end"), _g1h(g1h),
1854     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1855 
1856   void work(uint worker_id) {
1857     double start = os::elapsedTime();
1858     FreeRegionList local_cleanup_list("Local Cleanup List");
1859     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
1860     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1861     HRRSCleanupTask hrrs_cleanup_task;
1862     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1863                                            &old_proxy_set,
1864                                            &humongous_proxy_set,
1865                                            &hrrs_cleanup_task);
1866     if (G1CollectedHeap::use_parallel_gc_threads()) {
1867       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1868                                             _g1h->workers()->active_workers(),
1869                                             HeapRegion::NoteEndClaimValue);
1870     } else {
1871       _g1h->heap_region_iterate(&g1_note_end);
1872     }
1873     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1874 
1875     // Now update the lists
1876     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
1877                                             NULL /* free_list */,
1878                                             &old_proxy_set,
1879                                             &humongous_proxy_set,
1880                                             true /* par */);
1881     {
1882       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1883       _max_live_bytes += g1_note_end.max_live_bytes();
1884       _freed_bytes += g1_note_end.freed_bytes();
1885 
1886       // If we iterate over the global cleanup list at the end of
1887       // cleanup to do this printing we will not guarantee to only
1888       // generate output for the newly-reclaimed regions (the list
1889       // might not be empty at the beginning of cleanup; we might
1890       // still be working on its previous contents). So we do the
1891       // printing here, before we append the new regions to the global
1892       // cleanup list.
1893 
1894       G1HRPrinter* hr_printer = _g1h->hr_printer();
1895       if (hr_printer->is_active()) {
1896         HeapRegionLinkedListIterator iter(&local_cleanup_list);
1897         while (iter.more_available()) {
1898           HeapRegion* hr = iter.get_next();
1899           hr_printer->cleanup(hr);
1900         }
1901       }
1902 
1903       _cleanup_list->add_as_tail(&local_cleanup_list);
1904       assert(local_cleanup_list.is_empty(), "post-condition");
1905 
1906       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1907     }
1908   }
1909   size_t max_live_bytes() { return _max_live_bytes; }
1910   size_t freed_bytes() { return _freed_bytes; }
1911 };
1912 
1913 class G1ParScrubRemSetTask: public AbstractGangTask {
1914 protected:
1915   G1RemSet* _g1rs;
1916   BitMap* _region_bm;
1917   BitMap* _card_bm;
1918 public:
1919   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1920                        BitMap* region_bm, BitMap* card_bm) :
1921     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1922     _region_bm(region_bm), _card_bm(card_bm) { }
1923 
1924   void work(uint worker_id) {
1925     if (G1CollectedHeap::use_parallel_gc_threads()) {
1926       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1927                        HeapRegion::ScrubRemSetClaimValue);
1928     } else {
1929       _g1rs->scrub(_region_bm, _card_bm);
1930     }
1931   }
1932 
1933 };
1934 
1935 void ConcurrentMark::cleanup() {
1936   // world is stopped at this checkpoint
1937   assert(SafepointSynchronize::is_at_safepoint(),
1938          "world should be stopped");
1939   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1940 
1941   // If a full collection has happened, we shouldn't do this.
1942   if (has_aborted()) {
1943     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1944     return;
1945   }
1946 
1947   HRSPhaseSetter x(HRSPhaseCleanup);
1948   g1h->verify_region_sets_optional();
1949 
1950   if (VerifyDuringGC) {
1951     HandleMark hm;  // handle scope
1952     gclog_or_tty->print(" VerifyDuringGC:(before)");
1953     Universe::heap()->prepare_for_verify();
1954     Universe::verify(/* silent */ false,
1955                      /* option */ VerifyOption_G1UsePrevMarking);
1956   }
1957 
1958   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1959   g1p->record_concurrent_mark_cleanup_start();
1960 
1961   double start = os::elapsedTime();
1962 
1963   HeapRegionRemSet::reset_for_cleanup_tasks();
1964 
1965   uint n_workers;
1966 
1967   // Do counting once more with the world stopped for good measure.
1968   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1969 
1970   if (G1CollectedHeap::use_parallel_gc_threads()) {
1971    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1972            "sanity check");
1973 
1974     g1h->set_par_threads();
1975     n_workers = g1h->n_par_threads();
1976     assert(g1h->n_par_threads() == n_workers,
1977            "Should not have been reset");
1978     g1h->workers()->run_task(&g1_par_count_task);
1979     // Done with the parallel phase so reset to 0.
1980     g1h->set_par_threads(0);
1981 
1982     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
1983            "sanity check");
1984   } else {
1985     n_workers = 1;
1986     g1_par_count_task.work(0);
1987   }
1988 
1989   if (VerifyDuringGC) {
1990     // Verify that the counting data accumulated during marking matches
1991     // that calculated by walking the marking bitmap.
1992 
1993     // Bitmaps to hold expected values
1994     BitMap expected_region_bm(_region_bm.size(), false);
1995     BitMap expected_card_bm(_card_bm.size(), false);
1996 
1997     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1998                                                  &_region_bm,
1999                                                  &_card_bm,
2000                                                  &expected_region_bm,
2001                                                  &expected_card_bm);
2002 
2003     if (G1CollectedHeap::use_parallel_gc_threads()) {
2004       g1h->set_par_threads((int)n_workers);
2005       g1h->workers()->run_task(&g1_par_verify_task);
2006       // Done with the parallel phase so reset to 0.
2007       g1h->set_par_threads(0);
2008 
2009       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2010              "sanity check");
2011     } else {
2012       g1_par_verify_task.work(0);
2013     }
2014 
2015     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2016   }
2017 
2018   size_t start_used_bytes = g1h->used();
2019   g1h->set_marking_complete();
2020 
2021   double count_end = os::elapsedTime();
2022   double this_final_counting_time = (count_end - start);
2023   _total_counting_time += this_final_counting_time;
2024 
2025   if (G1PrintRegionLivenessInfo) {
2026     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2027     _g1h->heap_region_iterate(&cl);
2028   }
2029 
2030   // Install newly created mark bitMap as "prev".
2031   swapMarkBitMaps();
2032 
2033   g1h->reset_gc_time_stamp();
2034 
2035   // Note end of marking in all heap regions.
2036   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2037   if (G1CollectedHeap::use_parallel_gc_threads()) {
2038     g1h->set_par_threads((int)n_workers);
2039     g1h->workers()->run_task(&g1_par_note_end_task);
2040     g1h->set_par_threads(0);
2041 
2042     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2043            "sanity check");
2044   } else {
2045     g1_par_note_end_task.work(0);
2046   }
2047   g1h->check_gc_time_stamps();
2048 
2049   if (!cleanup_list_is_empty()) {
2050     // The cleanup list is not empty, so we'll have to process it
2051     // concurrently. Notify anyone else that might be wanting free
2052     // regions that there will be more free regions coming soon.
2053     g1h->set_free_regions_coming();
2054   }
2055 
2056   // call below, since it affects the metric by which we sort the heap
2057   // regions.
2058   if (G1ScrubRemSets) {
2059     double rs_scrub_start = os::elapsedTime();
2060     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2061     if (G1CollectedHeap::use_parallel_gc_threads()) {
2062       g1h->set_par_threads((int)n_workers);
2063       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2064       g1h->set_par_threads(0);
2065 
2066       assert(g1h->check_heap_region_claim_values(
2067                                             HeapRegion::ScrubRemSetClaimValue),
2068              "sanity check");
2069     } else {
2070       g1_par_scrub_rs_task.work(0);
2071     }
2072 
2073     double rs_scrub_end = os::elapsedTime();
2074     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2075     _total_rs_scrub_time += this_rs_scrub_time;
2076   }
2077 
2078   // this will also free any regions totally full of garbage objects,
2079   // and sort the regions.
2080   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2081 
2082   // Statistics.
2083   double end = os::elapsedTime();
2084   _cleanup_times.add((end - start) * 1000.0);
2085 
2086   if (G1Log::fine()) {
2087     g1h->print_size_transition(gclog_or_tty,
2088                                start_used_bytes,
2089                                g1h->used(),
2090                                g1h->capacity());
2091   }
2092 
2093   // Clean up will have freed any regions completely full of garbage.
2094   // Update the soft reference policy with the new heap occupancy.
2095   Universe::update_heap_info_at_gc();
2096 
2097   // We need to make this be a "collection" so any collection pause that
2098   // races with it goes around and waits for completeCleanup to finish.
2099   g1h->increment_total_collections();
2100 
2101   // We reclaimed old regions so we should calculate the sizes to make
2102   // sure we update the old gen/space data.
2103   g1h->g1mm()->update_sizes();
2104 
2105   if (VerifyDuringGC) {
2106     HandleMark hm;  // handle scope
2107     gclog_or_tty->print(" VerifyDuringGC:(after)");
2108     Universe::heap()->prepare_for_verify();
2109     Universe::verify(/* silent */ false,
2110                      /* option */ VerifyOption_G1UsePrevMarking);
2111   }
2112 
2113   g1h->verify_region_sets_optional();
2114 }
2115 
2116 void ConcurrentMark::completeCleanup() {
2117   if (has_aborted()) return;
2118 
2119   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2120 
2121   _cleanup_list.verify_optional();
2122   FreeRegionList tmp_free_list("Tmp Free List");
2123 
2124   if (G1ConcRegionFreeingVerbose) {
2125     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2126                            "cleanup list has %u entries",
2127                            _cleanup_list.length());
2128   }
2129 
2130   // Noone else should be accessing the _cleanup_list at this point,
2131   // so it's not necessary to take any locks
2132   while (!_cleanup_list.is_empty()) {
2133     HeapRegion* hr = _cleanup_list.remove_head();
2134     assert(hr != NULL, "the list was not empty");
2135     hr->par_clear();
2136     tmp_free_list.add_as_tail(hr);
2137 
2138     // Instead of adding one region at a time to the secondary_free_list,
2139     // we accumulate them in the local list and move them a few at a
2140     // time. This also cuts down on the number of notify_all() calls
2141     // we do during this process. We'll also append the local list when
2142     // _cleanup_list is empty (which means we just removed the last
2143     // region from the _cleanup_list).
2144     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2145         _cleanup_list.is_empty()) {
2146       if (G1ConcRegionFreeingVerbose) {
2147         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2148                                "appending %u entries to the secondary_free_list, "
2149                                "cleanup list still has %u entries",
2150                                tmp_free_list.length(),
2151                                _cleanup_list.length());
2152       }
2153 
2154       {
2155         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2156         g1h->secondary_free_list_add_as_tail(&tmp_free_list);
2157         SecondaryFreeList_lock->notify_all();
2158       }
2159 
2160       if (G1StressConcRegionFreeing) {
2161         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2162           os::sleep(Thread::current(), (jlong) 1, false);
2163         }
2164       }
2165     }
2166   }
2167   assert(tmp_free_list.is_empty(), "post-condition");
2168 }
2169 
2170 // Supporting Object and Oop closures for reference discovery
2171 // and processing in during marking
2172 
2173 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2174   HeapWord* addr = (HeapWord*)obj;
2175   return addr != NULL &&
2176          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2177 }
2178 
2179 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2180 // Uses the CMTask associated with a worker thread (for serial reference
2181 // processing the CMTask for worker 0 is used) to preserve (mark) and
2182 // trace referent objects.
2183 //
2184 // Using the CMTask and embedded local queues avoids having the worker
2185 // threads operating on the global mark stack. This reduces the risk
2186 // of overflowing the stack - which we would rather avoid at this late
2187 // state. Also using the tasks' local queues removes the potential
2188 // of the workers interfering with each other that could occur if
2189 // operating on the global stack.
2190 
2191 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2192   ConcurrentMark*  _cm;
2193   CMTask*          _task;
2194   int              _ref_counter_limit;
2195   int              _ref_counter;
2196  public:
2197   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
2198     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval) {
2199     assert(_ref_counter_limit > 0, "sanity");
2200     _ref_counter = _ref_counter_limit;
2201   }
2202 
2203   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2204   virtual void do_oop(      oop* p) { do_oop_work(p); }
2205 
2206   template <class T> void do_oop_work(T* p) {
2207     if (!_cm->has_overflown()) {
2208       oop obj = oopDesc::load_decode_heap_oop(p);
2209       if (_cm->verbose_high()) {
2210         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2211                                "*"PTR_FORMAT" = "PTR_FORMAT,
2212                                _task->worker_id(), p, (void*) obj);
2213       }
2214 
2215       _task->deal_with_reference(obj);
2216       _ref_counter--;
2217 
2218       if (_ref_counter == 0) {
2219         // We have dealt with _ref_counter_limit references, pushing them
2220         // and objects reachable from them on to the local stack (and
2221         // possibly the global stack). Call CMTask::do_marking_step() to
2222         // process these entries.
2223         //
2224         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2225         // there's nothing more to do (i.e. we're done with the entries that
2226         // were pushed as a result of the CMTask::deal_with_reference() calls
2227         // above) or we overflow.
2228         //
2229         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2230         // flag while there may still be some work to do. (See the comment at
2231         // the beginning of CMTask::do_marking_step() for those conditions -
2232         // one of which is reaching the specified time target.) It is only
2233         // when CMTask::do_marking_step() returns without setting the
2234         // has_aborted() flag that the marking step has completed.
2235         do {
2236           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2237           _task->do_marking_step(mark_step_duration_ms,
2238                                  false /* do_stealing    */,
2239                                  false /* do_termination */);
2240         } while (_task->has_aborted() && !_cm->has_overflown());
2241         _ref_counter = _ref_counter_limit;
2242       }
2243     } else {
2244       if (_cm->verbose_high()) {
2245          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2246       }
2247     }
2248   }
2249 };
2250 
2251 // 'Drain' oop closure used by both serial and parallel reference processing.
2252 // Uses the CMTask associated with a given worker thread (for serial
2253 // reference processing the CMtask for worker 0 is used). Calls the
2254 // do_marking_step routine, with an unbelievably large timeout value,
2255 // to drain the marking data structures of the remaining entries
2256 // added by the 'keep alive' oop closure above.
2257 
2258 class G1CMDrainMarkingStackClosure: public VoidClosure {
2259   ConcurrentMark* _cm;
2260   CMTask*         _task;
2261   bool            _do_stealing;
2262   bool            _do_termination;
2263  public:
2264   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_par) :
2265     _cm(cm), _task(task) {
2266     assert(is_par || _task->worker_id() == 0,
2267            "Only task for worker 0 should be used if ref processing is single threaded");
2268     // We only allow stealing and only enter the termination protocol
2269     // in CMTask::do_marking_step() if this closure is being instantiated
2270     // for parallel reference processing.
2271     _do_stealing = _do_termination = is_par;
2272   }
2273 
2274   void do_void() {
2275     do {
2276       if (_cm->verbose_high()) {
2277         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - "
2278                                "stealing: %s, termination: %s",
2279                                _task->worker_id(),
2280                                BOOL_TO_STR(_do_stealing),
2281                                BOOL_TO_STR(_do_termination));
2282       }
2283 
2284       // We call CMTask::do_marking_step() to completely drain the local
2285       // and global marking stacks of entries pushed by the 'keep alive'
2286       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2287       //
2288       // CMTask::do_marking_step() is called in a loop, which we'll exit
2289       // if there's nothing more to do (i.e. we'completely drained the
2290       // entries that were pushed as a a result of applying the 'keep alive'
2291       // closure to the entries on the discovered ref lists) or we overflow
2292       // the global marking stack.
2293       //
2294       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2295       // flag while there may still be some work to do. (See the comment at
2296       // the beginning of CMTask::do_marking_step() for those conditions -
2297       // one of which is reaching the specified time target.) It is only
2298       // when CMTask::do_marking_step() returns without setting the
2299       // has_aborted() flag that the marking step has completed.
2300 
2301       _task->do_marking_step(1000000000.0 /* something very large */,
2302                              _do_stealing,
2303                              _do_termination);
2304     } while (_task->has_aborted() && !_cm->has_overflown());
2305   }
2306 };
2307 
2308 // Implementation of AbstractRefProcTaskExecutor for parallel
2309 // reference processing at the end of G1 concurrent marking
2310 
2311 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2312 private:
2313   G1CollectedHeap* _g1h;
2314   ConcurrentMark*  _cm;
2315   WorkGang*        _workers;
2316   int              _active_workers;
2317 
2318 public:
2319   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2320                         ConcurrentMark* cm,
2321                         WorkGang* workers,
2322                         int n_workers) :
2323     _g1h(g1h), _cm(cm),
2324     _workers(workers), _active_workers(n_workers) { }
2325 
2326   // Executes the given task using concurrent marking worker threads.
2327   virtual void execute(ProcessTask& task);
2328   virtual void execute(EnqueueTask& task);
2329 };
2330 
2331 class G1CMRefProcTaskProxy: public AbstractGangTask {
2332   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2333   ProcessTask&     _proc_task;
2334   G1CollectedHeap* _g1h;
2335   ConcurrentMark*  _cm;
2336   bool             _processing_is_mt;
2337 
2338 public:
2339   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2340                      G1CollectedHeap* g1h,
2341                      ConcurrentMark* cm) :
2342     AbstractGangTask("Process reference objects in parallel"),
2343     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2344       ReferenceProcessor* rp = _g1h->ref_processor_cm();
2345       _processing_is_mt = rp->processing_is_mt();
2346     }
2347 
2348   virtual void work(uint worker_id) {
2349     CMTask* marking_task = _cm->task(worker_id);
2350     G1CMIsAliveClosure g1_is_alive(_g1h);
2351     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
2352     G1CMDrainMarkingStackClosure g1_par_drain(_cm, marking_task, _processing_is_mt);
2353 
2354     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2355   }
2356 };
2357 
2358 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2359   assert(_workers != NULL, "Need parallel worker threads.");
2360   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2361 
2362   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2363 
2364   // We need to reset the phase for each task execution so that
2365   // the termination protocol of CMTask::do_marking_step works.
2366   _cm->set_phase(_active_workers, false /* concurrent */);
2367   _g1h->set_par_threads(_active_workers);
2368   _workers->run_task(&proc_task_proxy);
2369   _g1h->set_par_threads(0);
2370 }
2371 
2372 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2373   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2374   EnqueueTask& _enq_task;
2375 
2376 public:
2377   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2378     AbstractGangTask("Enqueue reference objects in parallel"),
2379     _enq_task(enq_task) { }
2380 
2381   virtual void work(uint worker_id) {
2382     _enq_task.work(worker_id);
2383   }
2384 };
2385 
2386 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2387   assert(_workers != NULL, "Need parallel worker threads.");
2388   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2389 
2390   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2391 
2392   _g1h->set_par_threads(_active_workers);
2393   _workers->run_task(&enq_task_proxy);
2394   _g1h->set_par_threads(0);
2395 }
2396 
2397 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2398   ResourceMark rm;
2399   HandleMark   hm;
2400 
2401   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2402 
2403   // Is alive closure.
2404   G1CMIsAliveClosure g1_is_alive(g1h);
2405 
2406   // Inner scope to exclude the cleaning of the string and symbol
2407   // tables from the displayed time.
2408   {
2409     if (G1Log::finer()) {
2410       gclog_or_tty->put(' ');
2411     }
2412     TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2413 
2414     ReferenceProcessor* rp = g1h->ref_processor_cm();
2415 
2416     // See the comment in G1CollectedHeap::ref_processing_init()
2417     // about how reference processing currently works in G1.
2418 
2419     // Set the soft reference policy
2420     rp->setup_policy(clear_all_soft_refs);
2421     assert(_markStack.isEmpty(), "mark stack should be empty");
2422 
2423     // Non-MT instances 'Keep Alive' and 'Complete GC' oop closures.
2424     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0));
2425     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), false);
2426 
2427     // We need at least one active thread. If reference processing is
2428     // not multi-threaded we use the current (ConcurrentMarkThread) thread,
2429     // otherwise we use the work gang from the G1CollectedHeap and we
2430     // utilize all the worker threads we can.
2431     uint active_workers = (rp->processing_is_mt() && g1h->workers() != NULL
2432                                 ? g1h->workers()->active_workers()
2433                                 : 1U);
2434 
2435     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2436 
2437     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2438                                               g1h->workers(), active_workers);
2439 
2440     AbstractRefProcTaskExecutor* executor = (rp->processing_is_mt()
2441                                                 ? &par_task_executor
2442                                                 : NULL);
2443 
2444     // Set the degree of MT processing here.  If the discovery was done MT,
2445     // the number of threads involved during discovery could differ from
2446     // the number of active workers.  This is OK as long as the discovered
2447     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2448     rp->set_active_mt_degree(active_workers);
2449 
2450     // Process the weak references.
2451     rp->process_discovered_references(&g1_is_alive,
2452                                       &g1_keep_alive,
2453                                       &g1_drain_mark_stack,
2454                                       executor);
2455 
2456     // The do_oop work routines of the keep_alive and drain_marking_stack
2457     // oop closures will set the has_overflown flag if we overflow the
2458     // global marking stack.
2459 
2460     assert(_markStack.overflow() || _markStack.isEmpty(),
2461             "mark stack should be empty (unless it overflowed)");
2462     if (_markStack.overflow()) {
2463       // This should have been done already when we tried to push an
2464       // entry on to the global mark stack. But let's do it again.
2465       set_has_overflown();
2466     }
2467 
2468     assert(rp->num_q() == active_workers, "why not");
2469 
2470     rp->enqueue_discovered_references(executor);
2471 
2472     rp->verify_no_references_recorded();
2473     assert(!rp->discovery_enabled(), "Post condition");
2474   }
2475 
2476   // Now clean up stale oops in StringTable
2477   StringTable::unlink(&g1_is_alive);
2478   // Clean up unreferenced symbols in symbol table.
2479   SymbolTable::unlink();
2480 }
2481 
2482 void ConcurrentMark::swapMarkBitMaps() {
2483   CMBitMapRO* temp = _prevMarkBitMap;
2484   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2485   _nextMarkBitMap  = (CMBitMap*)  temp;
2486 }
2487 
2488 class CMRemarkTask: public AbstractGangTask {
2489 private:
2490   ConcurrentMark *_cm;
2491 
2492 public:
2493   void work(uint worker_id) {
2494     // Since all available tasks are actually started, we should
2495     // only proceed if we're supposed to be actived.
2496     if (worker_id < _cm->active_tasks()) {
2497       CMTask* task = _cm->task(worker_id);
2498       task->record_start_time();
2499       do {
2500         task->do_marking_step(1000000000.0 /* something very large */,
2501                               true /* do_stealing    */,
2502                               true /* do_termination */);
2503       } while (task->has_aborted() && !_cm->has_overflown());
2504       // If we overflow, then we do not want to restart. We instead
2505       // want to abort remark and do concurrent marking again.
2506       task->record_end_time();
2507     }
2508   }
2509 
2510   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2511     AbstractGangTask("Par Remark"), _cm(cm) {
2512     _cm->terminator()->reset_for_reuse(active_workers);
2513   }
2514 };
2515 
2516 void ConcurrentMark::checkpointRootsFinalWork() {
2517   ResourceMark rm;
2518   HandleMark   hm;
2519   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2520 
2521   g1h->ensure_parsability(false);
2522 
2523   if (G1CollectedHeap::use_parallel_gc_threads()) {
2524     G1CollectedHeap::StrongRootsScope srs(g1h);
2525     // this is remark, so we'll use up all active threads
2526     uint active_workers = g1h->workers()->active_workers();
2527     if (active_workers == 0) {
2528       assert(active_workers > 0, "Should have been set earlier");
2529       active_workers = (uint) ParallelGCThreads;
2530       g1h->workers()->set_active_workers(active_workers);
2531     }
2532     set_phase(active_workers, false /* concurrent */);
2533     // Leave _parallel_marking_threads at it's
2534     // value originally calculated in the ConcurrentMark
2535     // constructor and pass values of the active workers
2536     // through the gang in the task.
2537 
2538     CMRemarkTask remarkTask(this, active_workers);
2539     g1h->set_par_threads(active_workers);
2540     g1h->workers()->run_task(&remarkTask);
2541     g1h->set_par_threads(0);
2542   } else {
2543     G1CollectedHeap::StrongRootsScope srs(g1h);
2544     // this is remark, so we'll use up all available threads
2545     uint active_workers = 1;
2546     set_phase(active_workers, false /* concurrent */);
2547 
2548     CMRemarkTask remarkTask(this, active_workers);
2549     // We will start all available threads, even if we decide that the
2550     // active_workers will be fewer. The extra ones will just bail out
2551     // immediately.
2552     remarkTask.work(0);
2553   }
2554   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2555   guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
2556 
2557   print_stats();
2558 
2559 #if VERIFY_OBJS_PROCESSED
2560   if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2561     gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2562                            _scan_obj_cl.objs_processed,
2563                            ThreadLocalObjQueue::objs_enqueued);
2564     guarantee(_scan_obj_cl.objs_processed ==
2565               ThreadLocalObjQueue::objs_enqueued,
2566               "Different number of objs processed and enqueued.");
2567   }
2568 #endif
2569 }
2570 
2571 #ifndef PRODUCT
2572 
2573 class PrintReachableOopClosure: public OopClosure {
2574 private:
2575   G1CollectedHeap* _g1h;
2576   outputStream*    _out;
2577   VerifyOption     _vo;
2578   bool             _all;
2579 
2580 public:
2581   PrintReachableOopClosure(outputStream* out,
2582                            VerifyOption  vo,
2583                            bool          all) :
2584     _g1h(G1CollectedHeap::heap()),
2585     _out(out), _vo(vo), _all(all) { }
2586 
2587   void do_oop(narrowOop* p) { do_oop_work(p); }
2588   void do_oop(      oop* p) { do_oop_work(p); }
2589 
2590   template <class T> void do_oop_work(T* p) {
2591     oop         obj = oopDesc::load_decode_heap_oop(p);
2592     const char* str = NULL;
2593     const char* str2 = "";
2594 
2595     if (obj == NULL) {
2596       str = "";
2597     } else if (!_g1h->is_in_g1_reserved(obj)) {
2598       str = " O";
2599     } else {
2600       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2601       guarantee(hr != NULL, "invariant");
2602       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2603       bool marked = _g1h->is_marked(obj, _vo);
2604 
2605       if (over_tams) {
2606         str = " >";
2607         if (marked) {
2608           str2 = " AND MARKED";
2609         }
2610       } else if (marked) {
2611         str = " M";
2612       } else {
2613         str = " NOT";
2614       }
2615     }
2616 
2617     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2618                    p, (void*) obj, str, str2);
2619   }
2620 };
2621 
2622 class PrintReachableObjectClosure : public ObjectClosure {
2623 private:
2624   G1CollectedHeap* _g1h;
2625   outputStream*    _out;
2626   VerifyOption     _vo;
2627   bool             _all;
2628   HeapRegion*      _hr;
2629 
2630 public:
2631   PrintReachableObjectClosure(outputStream* out,
2632                               VerifyOption  vo,
2633                               bool          all,
2634                               HeapRegion*   hr) :
2635     _g1h(G1CollectedHeap::heap()),
2636     _out(out), _vo(vo), _all(all), _hr(hr) { }
2637 
2638   void do_object(oop o) {
2639     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2640     bool marked = _g1h->is_marked(o, _vo);
2641     bool print_it = _all || over_tams || marked;
2642 
2643     if (print_it) {
2644       _out->print_cr(" "PTR_FORMAT"%s",
2645                      o, (over_tams) ? " >" : (marked) ? " M" : "");
2646       PrintReachableOopClosure oopCl(_out, _vo, _all);
2647       o->oop_iterate_no_header(&oopCl);
2648     }
2649   }
2650 };
2651 
2652 class PrintReachableRegionClosure : public HeapRegionClosure {
2653 private:
2654   G1CollectedHeap* _g1h;
2655   outputStream*    _out;
2656   VerifyOption     _vo;
2657   bool             _all;
2658 
2659 public:
2660   bool doHeapRegion(HeapRegion* hr) {
2661     HeapWord* b = hr->bottom();
2662     HeapWord* e = hr->end();
2663     HeapWord* t = hr->top();
2664     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2665     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2666                    "TAMS: "PTR_FORMAT, b, e, t, p);
2667     _out->cr();
2668 
2669     HeapWord* from = b;
2670     HeapWord* to   = t;
2671 
2672     if (to > from) {
2673       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2674       _out->cr();
2675       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2676       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2677       _out->cr();
2678     }
2679 
2680     return false;
2681   }
2682 
2683   PrintReachableRegionClosure(outputStream* out,
2684                               VerifyOption  vo,
2685                               bool          all) :
2686     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2687 };
2688 
2689 void ConcurrentMark::print_reachable(const char* str,
2690                                      VerifyOption vo,
2691                                      bool all) {
2692   gclog_or_tty->cr();
2693   gclog_or_tty->print_cr("== Doing heap dump... ");
2694 
2695   if (G1PrintReachableBaseFile == NULL) {
2696     gclog_or_tty->print_cr("  #### error: no base file defined");
2697     return;
2698   }
2699 
2700   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2701       (JVM_MAXPATHLEN - 1)) {
2702     gclog_or_tty->print_cr("  #### error: file name too long");
2703     return;
2704   }
2705 
2706   char file_name[JVM_MAXPATHLEN];
2707   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2708   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2709 
2710   fileStream fout(file_name);
2711   if (!fout.is_open()) {
2712     gclog_or_tty->print_cr("  #### error: could not open file");
2713     return;
2714   }
2715 
2716   outputStream* out = &fout;
2717   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2718   out->cr();
2719 
2720   out->print_cr("--- ITERATING OVER REGIONS");
2721   out->cr();
2722   PrintReachableRegionClosure rcl(out, vo, all);
2723   _g1h->heap_region_iterate(&rcl);
2724   out->cr();
2725 
2726   gclog_or_tty->print_cr("  done");
2727   gclog_or_tty->flush();
2728 }
2729 
2730 #endif // PRODUCT
2731 
2732 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2733   // Note we are overriding the read-only view of the prev map here, via
2734   // the cast.
2735   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2736 }
2737 
2738 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2739   _nextMarkBitMap->clearRange(mr);
2740 }
2741 
2742 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2743   clearRangePrevBitmap(mr);
2744   clearRangeNextBitmap(mr);
2745 }
2746 
2747 HeapRegion*
2748 ConcurrentMark::claim_region(uint worker_id) {
2749   // "checkpoint" the finger
2750   HeapWord* finger = _finger;
2751 
2752   // _heap_end will not change underneath our feet; it only changes at
2753   // yield points.
2754   while (finger < _heap_end) {
2755     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2756 
2757     // Note on how this code handles humongous regions. In the
2758     // normal case the finger will reach the start of a "starts
2759     // humongous" (SH) region. Its end will either be the end of the
2760     // last "continues humongous" (CH) region in the sequence, or the
2761     // standard end of the SH region (if the SH is the only region in
2762     // the sequence). That way claim_region() will skip over the CH
2763     // regions. However, there is a subtle race between a CM thread
2764     // executing this method and a mutator thread doing a humongous
2765     // object allocation. The two are not mutually exclusive as the CM
2766     // thread does not need to hold the Heap_lock when it gets
2767     // here. So there is a chance that claim_region() will come across
2768     // a free region that's in the progress of becoming a SH or a CH
2769     // region. In the former case, it will either
2770     //   a) Miss the update to the region's end, in which case it will
2771     //      visit every subsequent CH region, will find their bitmaps
2772     //      empty, and do nothing, or
2773     //   b) Will observe the update of the region's end (in which case
2774     //      it will skip the subsequent CH regions).
2775     // If it comes across a region that suddenly becomes CH, the
2776     // scenario will be similar to b). So, the race between
2777     // claim_region() and a humongous object allocation might force us
2778     // to do a bit of unnecessary work (due to some unnecessary bitmap
2779     // iterations) but it should not introduce and correctness issues.
2780     HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
2781     HeapWord*   bottom        = curr_region->bottom();
2782     HeapWord*   end           = curr_region->end();
2783     HeapWord*   limit         = curr_region->next_top_at_mark_start();
2784 
2785     if (verbose_low()) {
2786       gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2787                              "["PTR_FORMAT", "PTR_FORMAT"), "
2788                              "limit = "PTR_FORMAT,
2789                              worker_id, curr_region, bottom, end, limit);
2790     }
2791 
2792     // Is the gap between reading the finger and doing the CAS too long?
2793     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2794     if (res == finger) {
2795       // we succeeded
2796 
2797       // notice that _finger == end cannot be guaranteed here since,
2798       // someone else might have moved the finger even further
2799       assert(_finger >= end, "the finger should have moved forward");
2800 
2801       if (verbose_low()) {
2802         gclog_or_tty->print_cr("[%u] we were successful with region = "
2803                                PTR_FORMAT, worker_id, curr_region);
2804       }
2805 
2806       if (limit > bottom) {
2807         if (verbose_low()) {
2808           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2809                                  "returning it ", worker_id, curr_region);
2810         }
2811         return curr_region;
2812       } else {
2813         assert(limit == bottom,
2814                "the region limit should be at bottom");
2815         if (verbose_low()) {
2816           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2817                                  "returning NULL", worker_id, curr_region);
2818         }
2819         // we return NULL and the caller should try calling
2820         // claim_region() again.
2821         return NULL;
2822       }
2823     } else {
2824       assert(_finger > finger, "the finger should have moved forward");
2825       if (verbose_low()) {
2826         gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2827                                "global finger = "PTR_FORMAT", "
2828                                "our finger = "PTR_FORMAT,
2829                                worker_id, _finger, finger);
2830       }
2831 
2832       // read it again
2833       finger = _finger;
2834     }
2835   }
2836 
2837   return NULL;
2838 }
2839 
2840 #ifndef PRODUCT
2841 enum VerifyNoCSetOopsPhase {
2842   VerifyNoCSetOopsStack,
2843   VerifyNoCSetOopsQueues,
2844   VerifyNoCSetOopsSATBCompleted,
2845   VerifyNoCSetOopsSATBThread
2846 };
2847 
2848 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2849 private:
2850   G1CollectedHeap* _g1h;
2851   VerifyNoCSetOopsPhase _phase;
2852   int _info;
2853 
2854   const char* phase_str() {
2855     switch (_phase) {
2856     case VerifyNoCSetOopsStack:         return "Stack";
2857     case VerifyNoCSetOopsQueues:        return "Queue";
2858     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2859     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2860     default:                            ShouldNotReachHere();
2861     }
2862     return NULL;
2863   }
2864 
2865   void do_object_work(oop obj) {
2866     guarantee(!_g1h->obj_in_cs(obj),
2867               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2868                       (void*) obj, phase_str(), _info));
2869   }
2870 
2871 public:
2872   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2873 
2874   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2875     _phase = phase;
2876     _info = info;
2877   }
2878 
2879   virtual void do_oop(oop* p) {
2880     oop obj = oopDesc::load_decode_heap_oop(p);
2881     do_object_work(obj);
2882   }
2883 
2884   virtual void do_oop(narrowOop* p) {
2885     // We should not come across narrow oops while scanning marking
2886     // stacks and SATB buffers.
2887     ShouldNotReachHere();
2888   }
2889 
2890   virtual void do_object(oop obj) {
2891     do_object_work(obj);
2892   }
2893 };
2894 
2895 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2896                                          bool verify_enqueued_buffers,
2897                                          bool verify_thread_buffers,
2898                                          bool verify_fingers) {
2899   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2900   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2901     return;
2902   }
2903 
2904   VerifyNoCSetOopsClosure cl;
2905 
2906   if (verify_stacks) {
2907     // Verify entries on the global mark stack
2908     cl.set_phase(VerifyNoCSetOopsStack);
2909     _markStack.oops_do(&cl);
2910 
2911     // Verify entries on the task queues
2912     for (uint i = 0; i < _max_worker_id; i += 1) {
2913       cl.set_phase(VerifyNoCSetOopsQueues, i);
2914       CMTaskQueue* queue = _task_queues->queue(i);
2915       queue->oops_do(&cl);
2916     }
2917   }
2918 
2919   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2920 
2921   // Verify entries on the enqueued SATB buffers
2922   if (verify_enqueued_buffers) {
2923     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2924     satb_qs.iterate_completed_buffers_read_only(&cl);
2925   }
2926 
2927   // Verify entries on the per-thread SATB buffers
2928   if (verify_thread_buffers) {
2929     cl.set_phase(VerifyNoCSetOopsSATBThread);
2930     satb_qs.iterate_thread_buffers_read_only(&cl);
2931   }
2932 
2933   if (verify_fingers) {
2934     // Verify the global finger
2935     HeapWord* global_finger = finger();
2936     if (global_finger != NULL && global_finger < _heap_end) {
2937       // The global finger always points to a heap region boundary. We
2938       // use heap_region_containing_raw() to get the containing region
2939       // given that the global finger could be pointing to a free region
2940       // which subsequently becomes continues humongous. If that
2941       // happens, heap_region_containing() will return the bottom of the
2942       // corresponding starts humongous region and the check below will
2943       // not hold any more.
2944       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2945       guarantee(global_finger == global_hr->bottom(),
2946                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2947                         global_finger, HR_FORMAT_PARAMS(global_hr)));
2948     }
2949 
2950     // Verify the task fingers
2951     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2952     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2953       CMTask* task = _tasks[i];
2954       HeapWord* task_finger = task->finger();
2955       if (task_finger != NULL && task_finger < _heap_end) {
2956         // See above note on the global finger verification.
2957         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2958         guarantee(task_finger == task_hr->bottom() ||
2959                   !task_hr->in_collection_set(),
2960                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2961                           task_finger, HR_FORMAT_PARAMS(task_hr)));
2962       }
2963     }
2964   }
2965 }
2966 #endif // PRODUCT
2967 
2968 // Aggregate the counting data that was constructed concurrently
2969 // with marking.
2970 class AggregateCountDataHRClosure: public HeapRegionClosure {
2971   G1CollectedHeap* _g1h;
2972   ConcurrentMark* _cm;
2973   CardTableModRefBS* _ct_bs;
2974   BitMap* _cm_card_bm;
2975   uint _max_worker_id;
2976 
2977  public:
2978   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2979                               BitMap* cm_card_bm,
2980                               uint max_worker_id) :
2981     _g1h(g1h), _cm(g1h->concurrent_mark()),
2982     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
2983     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2984 
2985   bool doHeapRegion(HeapRegion* hr) {
2986     if (hr->continuesHumongous()) {
2987       // We will ignore these here and process them when their
2988       // associated "starts humongous" region is processed.
2989       // Note that we cannot rely on their associated
2990       // "starts humongous" region to have their bit set to 1
2991       // since, due to the region chunking in the parallel region
2992       // iteration, a "continues humongous" region might be visited
2993       // before its associated "starts humongous".
2994       return false;
2995     }
2996 
2997     HeapWord* start = hr->bottom();
2998     HeapWord* limit = hr->next_top_at_mark_start();
2999     HeapWord* end = hr->end();
3000 
3001     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3002            err_msg("Preconditions not met - "
3003                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3004                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3005                    start, limit, hr->top(), hr->end()));
3006 
3007     assert(hr->next_marked_bytes() == 0, "Precondition");
3008 
3009     if (start == limit) {
3010       // NTAMS of this region has not been set so nothing to do.
3011       return false;
3012     }
3013 
3014     // 'start' should be in the heap.
3015     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3016     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3017     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3018 
3019     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3020     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3021     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3022 
3023     // If ntams is not card aligned then we bump card bitmap index
3024     // for limit so that we get the all the cards spanned by
3025     // the object ending at ntams.
3026     // Note: if this is the last region in the heap then ntams
3027     // could be actually just beyond the end of the the heap;
3028     // limit_idx will then  correspond to a (non-existent) card
3029     // that is also outside the heap.
3030     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3031       limit_idx += 1;
3032     }
3033 
3034     assert(limit_idx <= end_idx, "or else use atomics");
3035 
3036     // Aggregate the "stripe" in the count data associated with hr.
3037     uint hrs_index = hr->hrs_index();
3038     size_t marked_bytes = 0;
3039 
3040     for (uint i = 0; i < _max_worker_id; i += 1) {
3041       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3042       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3043 
3044       // Fetch the marked_bytes in this region for task i and
3045       // add it to the running total for this region.
3046       marked_bytes += marked_bytes_array[hrs_index];
3047 
3048       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3049       // into the global card bitmap.
3050       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3051 
3052       while (scan_idx < limit_idx) {
3053         assert(task_card_bm->at(scan_idx) == true, "should be");
3054         _cm_card_bm->set_bit(scan_idx);
3055         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3056 
3057         // BitMap::get_next_one_offset() can handle the case when
3058         // its left_offset parameter is greater than its right_offset
3059         // parameter. It does, however, have an early exit if
3060         // left_offset == right_offset. So let's limit the value
3061         // passed in for left offset here.
3062         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3063         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3064       }
3065     }
3066 
3067     // Update the marked bytes for this region.
3068     hr->add_to_marked_bytes(marked_bytes);
3069 
3070     // Next heap region
3071     return false;
3072   }
3073 };
3074 
3075 class G1AggregateCountDataTask: public AbstractGangTask {
3076 protected:
3077   G1CollectedHeap* _g1h;
3078   ConcurrentMark* _cm;
3079   BitMap* _cm_card_bm;
3080   uint _max_worker_id;
3081   int _active_workers;
3082 
3083 public:
3084   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3085                            ConcurrentMark* cm,
3086                            BitMap* cm_card_bm,
3087                            uint max_worker_id,
3088                            int n_workers) :
3089     AbstractGangTask("Count Aggregation"),
3090     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3091     _max_worker_id(max_worker_id),
3092     _active_workers(n_workers) { }
3093 
3094   void work(uint worker_id) {
3095     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3096 
3097     if (G1CollectedHeap::use_parallel_gc_threads()) {
3098       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3099                                             _active_workers,
3100                                             HeapRegion::AggregateCountClaimValue);
3101     } else {
3102       _g1h->heap_region_iterate(&cl);
3103     }
3104   }
3105 };
3106 
3107 
3108 void ConcurrentMark::aggregate_count_data() {
3109   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3110                         _g1h->workers()->active_workers() :
3111                         1);
3112 
3113   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3114                                            _max_worker_id, n_workers);
3115 
3116   if (G1CollectedHeap::use_parallel_gc_threads()) {
3117     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3118            "sanity check");
3119     _g1h->set_par_threads(n_workers);
3120     _g1h->workers()->run_task(&g1_par_agg_task);
3121     _g1h->set_par_threads(0);
3122 
3123     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3124            "sanity check");
3125     _g1h->reset_heap_region_claim_values();
3126   } else {
3127     g1_par_agg_task.work(0);
3128   }
3129 }
3130 
3131 // Clear the per-worker arrays used to store the per-region counting data
3132 void ConcurrentMark::clear_all_count_data() {
3133   // Clear the global card bitmap - it will be filled during
3134   // liveness count aggregation (during remark) and the
3135   // final counting task.
3136   _card_bm.clear();
3137 
3138   // Clear the global region bitmap - it will be filled as part
3139   // of the final counting task.
3140   _region_bm.clear();
3141 
3142   uint max_regions = _g1h->max_regions();
3143   assert(_max_worker_id > 0, "uninitialized");
3144 
3145   for (uint i = 0; i < _max_worker_id; i += 1) {
3146     BitMap* task_card_bm = count_card_bitmap_for(i);
3147     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3148 
3149     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3150     assert(marked_bytes_array != NULL, "uninitialized");
3151 
3152     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3153     task_card_bm->clear();
3154   }
3155 }
3156 
3157 void ConcurrentMark::print_stats() {
3158   if (verbose_stats()) {
3159     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3160     for (size_t i = 0; i < _active_tasks; ++i) {
3161       _tasks[i]->print_stats();
3162       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3163     }
3164   }
3165 }
3166 
3167 // abandon current marking iteration due to a Full GC
3168 void ConcurrentMark::abort() {
3169   // Clear all marks to force marking thread to do nothing
3170   _nextMarkBitMap->clearAll();
3171   // Clear the liveness counting data
3172   clear_all_count_data();
3173   // Empty mark stack
3174   reset_marking_state();
3175   for (uint i = 0; i < _max_worker_id; ++i) {
3176     _tasks[i]->clear_region_fields();
3177   }
3178   _has_aborted = true;
3179 
3180   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3181   satb_mq_set.abandon_partial_marking();
3182   // This can be called either during or outside marking, we'll read
3183   // the expected_active value from the SATB queue set.
3184   satb_mq_set.set_active_all_threads(
3185                                  false, /* new active value */
3186                                  satb_mq_set.is_active() /* expected_active */);
3187 }
3188 
3189 static void print_ms_time_info(const char* prefix, const char* name,
3190                                NumberSeq& ns) {
3191   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3192                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3193   if (ns.num() > 0) {
3194     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3195                            prefix, ns.sd(), ns.maximum());
3196   }
3197 }
3198 
3199 void ConcurrentMark::print_summary_info() {
3200   gclog_or_tty->print_cr(" Concurrent marking:");
3201   print_ms_time_info("  ", "init marks", _init_times);
3202   print_ms_time_info("  ", "remarks", _remark_times);
3203   {
3204     print_ms_time_info("     ", "final marks", _remark_mark_times);
3205     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3206 
3207   }
3208   print_ms_time_info("  ", "cleanups", _cleanup_times);
3209   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3210                          _total_counting_time,
3211                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3212                           (double)_cleanup_times.num()
3213                          : 0.0));
3214   if (G1ScrubRemSets) {
3215     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3216                            _total_rs_scrub_time,
3217                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3218                             (double)_cleanup_times.num()
3219                            : 0.0));
3220   }
3221   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3222                          (_init_times.sum() + _remark_times.sum() +
3223                           _cleanup_times.sum())/1000.0);
3224   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3225                 "(%8.2f s marking).",
3226                 cmThread()->vtime_accum(),
3227                 cmThread()->vtime_mark_accum());
3228 }
3229 
3230 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3231   if (use_parallel_marking_threads()) {
3232     _parallel_workers->print_worker_threads_on(st);
3233   }
3234 }
3235 
3236 // We take a break if someone is trying to stop the world.
3237 bool ConcurrentMark::do_yield_check(uint worker_id) {
3238   if (should_yield()) {
3239     if (worker_id == 0) {
3240       _g1h->g1_policy()->record_concurrent_pause();
3241     }
3242     cmThread()->yield();
3243     return true;
3244   } else {
3245     return false;
3246   }
3247 }
3248 
3249 bool ConcurrentMark::should_yield() {
3250   return cmThread()->should_yield();
3251 }
3252 
3253 bool ConcurrentMark::containing_card_is_marked(void* p) {
3254   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3255   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3256 }
3257 
3258 bool ConcurrentMark::containing_cards_are_marked(void* start,
3259                                                  void* last) {
3260   return containing_card_is_marked(start) &&
3261          containing_card_is_marked(last);
3262 }
3263 
3264 #ifndef PRODUCT
3265 // for debugging purposes
3266 void ConcurrentMark::print_finger() {
3267   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3268                          _heap_start, _heap_end, _finger);
3269   for (uint i = 0; i < _max_worker_id; ++i) {
3270     gclog_or_tty->print("   %u: "PTR_FORMAT, i, _tasks[i]->finger());
3271   }
3272   gclog_or_tty->print_cr("");
3273 }
3274 #endif
3275 
3276 void CMTask::scan_object(oop obj) {
3277   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3278 
3279   if (_cm->verbose_high()) {
3280     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3281                            _worker_id, (void*) obj);
3282   }
3283 
3284   size_t obj_size = obj->size();
3285   _words_scanned += obj_size;
3286 
3287   obj->oop_iterate(_cm_oop_closure);
3288   statsOnly( ++_objs_scanned );
3289   check_limits();
3290 }
3291 
3292 // Closure for iteration over bitmaps
3293 class CMBitMapClosure : public BitMapClosure {
3294 private:
3295   // the bitmap that is being iterated over
3296   CMBitMap*                   _nextMarkBitMap;
3297   ConcurrentMark*             _cm;
3298   CMTask*                     _task;
3299 
3300 public:
3301   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3302     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3303 
3304   bool do_bit(size_t offset) {
3305     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3306     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3307     assert( addr < _cm->finger(), "invariant");
3308 
3309     statsOnly( _task->increase_objs_found_on_bitmap() );
3310     assert(addr >= _task->finger(), "invariant");
3311 
3312     // We move that task's local finger along.
3313     _task->move_finger_to(addr);
3314 
3315     _task->scan_object(oop(addr));
3316     // we only partially drain the local queue and global stack
3317     _task->drain_local_queue(true);
3318     _task->drain_global_stack(true);
3319 
3320     // if the has_aborted flag has been raised, we need to bail out of
3321     // the iteration
3322     return !_task->has_aborted();
3323   }
3324 };
3325 
3326 // Closure for iterating over objects, currently only used for
3327 // processing SATB buffers.
3328 class CMObjectClosure : public ObjectClosure {
3329 private:
3330   CMTask* _task;
3331 
3332 public:
3333   void do_object(oop obj) {
3334     _task->deal_with_reference(obj);
3335   }
3336 
3337   CMObjectClosure(CMTask* task) : _task(task) { }
3338 };
3339 
3340 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3341                                ConcurrentMark* cm,
3342                                CMTask* task)
3343   : _g1h(g1h), _cm(cm), _task(task) {
3344   assert(_ref_processor == NULL, "should be initialized to NULL");
3345 
3346   if (G1UseConcMarkReferenceProcessing) {
3347     _ref_processor = g1h->ref_processor_cm();
3348     assert(_ref_processor != NULL, "should not be NULL");
3349   }
3350 }
3351 
3352 void CMTask::setup_for_region(HeapRegion* hr) {
3353   // Separated the asserts so that we know which one fires.
3354   assert(hr != NULL,
3355         "claim_region() should have filtered out continues humongous regions");
3356   assert(!hr->continuesHumongous(),
3357         "claim_region() should have filtered out continues humongous regions");
3358 
3359   if (_cm->verbose_low()) {
3360     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3361                            _worker_id, hr);
3362   }
3363 
3364   _curr_region  = hr;
3365   _finger       = hr->bottom();
3366   update_region_limit();
3367 }
3368 
3369 void CMTask::update_region_limit() {
3370   HeapRegion* hr            = _curr_region;
3371   HeapWord* bottom          = hr->bottom();
3372   HeapWord* limit           = hr->next_top_at_mark_start();
3373 
3374   if (limit == bottom) {
3375     if (_cm->verbose_low()) {
3376       gclog_or_tty->print_cr("[%u] found an empty region "
3377                              "["PTR_FORMAT", "PTR_FORMAT")",
3378                              _worker_id, bottom, limit);
3379     }
3380     // The region was collected underneath our feet.
3381     // We set the finger to bottom to ensure that the bitmap
3382     // iteration that will follow this will not do anything.
3383     // (this is not a condition that holds when we set the region up,
3384     // as the region is not supposed to be empty in the first place)
3385     _finger = bottom;
3386   } else if (limit >= _region_limit) {
3387     assert(limit >= _finger, "peace of mind");
3388   } else {
3389     assert(limit < _region_limit, "only way to get here");
3390     // This can happen under some pretty unusual circumstances.  An
3391     // evacuation pause empties the region underneath our feet (NTAMS
3392     // at bottom). We then do some allocation in the region (NTAMS
3393     // stays at bottom), followed by the region being used as a GC
3394     // alloc region (NTAMS will move to top() and the objects
3395     // originally below it will be grayed). All objects now marked in
3396     // the region are explicitly grayed, if below the global finger,
3397     // and we do not need in fact to scan anything else. So, we simply
3398     // set _finger to be limit to ensure that the bitmap iteration
3399     // doesn't do anything.
3400     _finger = limit;
3401   }
3402 
3403   _region_limit = limit;
3404 }
3405 
3406 void CMTask::giveup_current_region() {
3407   assert(_curr_region != NULL, "invariant");
3408   if (_cm->verbose_low()) {
3409     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3410                            _worker_id, _curr_region);
3411   }
3412   clear_region_fields();
3413 }
3414 
3415 void CMTask::clear_region_fields() {
3416   // Values for these three fields that indicate that we're not
3417   // holding on to a region.
3418   _curr_region   = NULL;
3419   _finger        = NULL;
3420   _region_limit  = NULL;
3421 }
3422 
3423 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3424   if (cm_oop_closure == NULL) {
3425     assert(_cm_oop_closure != NULL, "invariant");
3426   } else {
3427     assert(_cm_oop_closure == NULL, "invariant");
3428   }
3429   _cm_oop_closure = cm_oop_closure;
3430 }
3431 
3432 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3433   guarantee(nextMarkBitMap != NULL, "invariant");
3434 
3435   if (_cm->verbose_low()) {
3436     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3437   }
3438 
3439   _nextMarkBitMap                = nextMarkBitMap;
3440   clear_region_fields();
3441 
3442   _calls                         = 0;
3443   _elapsed_time_ms               = 0.0;
3444   _termination_time_ms           = 0.0;
3445   _termination_start_time_ms     = 0.0;
3446 
3447 #if _MARKING_STATS_
3448   _local_pushes                  = 0;
3449   _local_pops                    = 0;
3450   _local_max_size                = 0;
3451   _objs_scanned                  = 0;
3452   _global_pushes                 = 0;
3453   _global_pops                   = 0;
3454   _global_max_size               = 0;
3455   _global_transfers_to           = 0;
3456   _global_transfers_from         = 0;
3457   _regions_claimed               = 0;
3458   _objs_found_on_bitmap          = 0;
3459   _satb_buffers_processed        = 0;
3460   _steal_attempts                = 0;
3461   _steals                        = 0;
3462   _aborted                       = 0;
3463   _aborted_overflow              = 0;
3464   _aborted_cm_aborted            = 0;
3465   _aborted_yield                 = 0;
3466   _aborted_timed_out             = 0;
3467   _aborted_satb                  = 0;
3468   _aborted_termination           = 0;
3469 #endif // _MARKING_STATS_
3470 }
3471 
3472 bool CMTask::should_exit_termination() {
3473   regular_clock_call();
3474   // This is called when we are in the termination protocol. We should
3475   // quit if, for some reason, this task wants to abort or the global
3476   // stack is not empty (this means that we can get work from it).
3477   return !_cm->mark_stack_empty() || has_aborted();
3478 }
3479 
3480 void CMTask::reached_limit() {
3481   assert(_words_scanned >= _words_scanned_limit ||
3482          _refs_reached >= _refs_reached_limit ,
3483          "shouldn't have been called otherwise");
3484   regular_clock_call();
3485 }
3486 
3487 void CMTask::regular_clock_call() {
3488   if (has_aborted()) return;
3489 
3490   // First, we need to recalculate the words scanned and refs reached
3491   // limits for the next clock call.
3492   recalculate_limits();
3493 
3494   // During the regular clock call we do the following
3495 
3496   // (1) If an overflow has been flagged, then we abort.
3497   if (_cm->has_overflown()) {
3498     set_has_aborted();
3499     return;
3500   }
3501 
3502   // If we are not concurrent (i.e. we're doing remark) we don't need
3503   // to check anything else. The other steps are only needed during
3504   // the concurrent marking phase.
3505   if (!concurrent()) return;
3506 
3507   // (2) If marking has been aborted for Full GC, then we also abort.
3508   if (_cm->has_aborted()) {
3509     set_has_aborted();
3510     statsOnly( ++_aborted_cm_aborted );
3511     return;
3512   }
3513 
3514   double curr_time_ms = os::elapsedVTime() * 1000.0;
3515 
3516   // (3) If marking stats are enabled, then we update the step history.
3517 #if _MARKING_STATS_
3518   if (_words_scanned >= _words_scanned_limit) {
3519     ++_clock_due_to_scanning;
3520   }
3521   if (_refs_reached >= _refs_reached_limit) {
3522     ++_clock_due_to_marking;
3523   }
3524 
3525   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3526   _interval_start_time_ms = curr_time_ms;
3527   _all_clock_intervals_ms.add(last_interval_ms);
3528 
3529   if (_cm->verbose_medium()) {
3530       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3531                         "scanned = %d%s, refs reached = %d%s",
3532                         _worker_id, last_interval_ms,
3533                         _words_scanned,
3534                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3535                         _refs_reached,
3536                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3537   }
3538 #endif // _MARKING_STATS_
3539 
3540   // (4) We check whether we should yield. If we have to, then we abort.
3541   if (_cm->should_yield()) {
3542     // We should yield. To do this we abort the task. The caller is
3543     // responsible for yielding.
3544     set_has_aborted();
3545     statsOnly( ++_aborted_yield );
3546     return;
3547   }
3548 
3549   // (5) We check whether we've reached our time quota. If we have,
3550   // then we abort.
3551   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3552   if (elapsed_time_ms > _time_target_ms) {
3553     set_has_aborted();
3554     _has_timed_out = true;
3555     statsOnly( ++_aborted_timed_out );
3556     return;
3557   }
3558 
3559   // (6) Finally, we check whether there are enough completed STAB
3560   // buffers available for processing. If there are, we abort.
3561   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3562   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3563     if (_cm->verbose_low()) {
3564       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3565                              _worker_id);
3566     }
3567     // we do need to process SATB buffers, we'll abort and restart
3568     // the marking task to do so
3569     set_has_aborted();
3570     statsOnly( ++_aborted_satb );
3571     return;
3572   }
3573 }
3574 
3575 void CMTask::recalculate_limits() {
3576   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3577   _words_scanned_limit      = _real_words_scanned_limit;
3578 
3579   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3580   _refs_reached_limit       = _real_refs_reached_limit;
3581 }
3582 
3583 void CMTask::decrease_limits() {
3584   // This is called when we believe that we're going to do an infrequent
3585   // operation which will increase the per byte scanned cost (i.e. move
3586   // entries to/from the global stack). It basically tries to decrease the
3587   // scanning limit so that the clock is called earlier.
3588 
3589   if (_cm->verbose_medium()) {
3590     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3591   }
3592 
3593   _words_scanned_limit = _real_words_scanned_limit -
3594     3 * words_scanned_period / 4;
3595   _refs_reached_limit  = _real_refs_reached_limit -
3596     3 * refs_reached_period / 4;
3597 }
3598 
3599 void CMTask::move_entries_to_global_stack() {
3600   // local array where we'll store the entries that will be popped
3601   // from the local queue
3602   oop buffer[global_stack_transfer_size];
3603 
3604   int n = 0;
3605   oop obj;
3606   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3607     buffer[n] = obj;
3608     ++n;
3609   }
3610 
3611   if (n > 0) {
3612     // we popped at least one entry from the local queue
3613 
3614     statsOnly( ++_global_transfers_to; _local_pops += n );
3615 
3616     if (!_cm->mark_stack_push(buffer, n)) {
3617       if (_cm->verbose_low()) {
3618         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3619                                _worker_id);
3620       }
3621       set_has_aborted();
3622     } else {
3623       // the transfer was successful
3624 
3625       if (_cm->verbose_medium()) {
3626         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3627                                _worker_id, n);
3628       }
3629       statsOnly( int tmp_size = _cm->mark_stack_size();
3630                  if (tmp_size > _global_max_size) {
3631                    _global_max_size = tmp_size;
3632                  }
3633                  _global_pushes += n );
3634     }
3635   }
3636 
3637   // this operation was quite expensive, so decrease the limits
3638   decrease_limits();
3639 }
3640 
3641 void CMTask::get_entries_from_global_stack() {
3642   // local array where we'll store the entries that will be popped
3643   // from the global stack.
3644   oop buffer[global_stack_transfer_size];
3645   int n;
3646   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3647   assert(n <= global_stack_transfer_size,
3648          "we should not pop more than the given limit");
3649   if (n > 0) {
3650     // yes, we did actually pop at least one entry
3651 
3652     statsOnly( ++_global_transfers_from; _global_pops += n );
3653     if (_cm->verbose_medium()) {
3654       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3655                              _worker_id, n);
3656     }
3657     for (int i = 0; i < n; ++i) {
3658       bool success = _task_queue->push(buffer[i]);
3659       // We only call this when the local queue is empty or under a
3660       // given target limit. So, we do not expect this push to fail.
3661       assert(success, "invariant");
3662     }
3663 
3664     statsOnly( int tmp_size = _task_queue->size();
3665                if (tmp_size > _local_max_size) {
3666                  _local_max_size = tmp_size;
3667                }
3668                _local_pushes += n );
3669   }
3670 
3671   // this operation was quite expensive, so decrease the limits
3672   decrease_limits();
3673 }
3674 
3675 void CMTask::drain_local_queue(bool partially) {
3676   if (has_aborted()) return;
3677 
3678   // Decide what the target size is, depending whether we're going to
3679   // drain it partially (so that other tasks can steal if they run out
3680   // of things to do) or totally (at the very end).
3681   size_t target_size;
3682   if (partially) {
3683     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3684   } else {
3685     target_size = 0;
3686   }
3687 
3688   if (_task_queue->size() > target_size) {
3689     if (_cm->verbose_high()) {
3690       gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
3691                              _worker_id, target_size);
3692     }
3693 
3694     oop obj;
3695     bool ret = _task_queue->pop_local(obj);
3696     while (ret) {
3697       statsOnly( ++_local_pops );
3698 
3699       if (_cm->verbose_high()) {
3700         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3701                                (void*) obj);
3702       }
3703 
3704       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3705       assert(!_g1h->is_on_master_free_list(
3706                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3707 
3708       scan_object(obj);
3709 
3710       if (_task_queue->size() <= target_size || has_aborted()) {
3711         ret = false;
3712       } else {
3713         ret = _task_queue->pop_local(obj);
3714       }
3715     }
3716 
3717     if (_cm->verbose_high()) {
3718       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3719                              _worker_id, _task_queue->size());
3720     }
3721   }
3722 }
3723 
3724 void CMTask::drain_global_stack(bool partially) {
3725   if (has_aborted()) return;
3726 
3727   // We have a policy to drain the local queue before we attempt to
3728   // drain the global stack.
3729   assert(partially || _task_queue->size() == 0, "invariant");
3730 
3731   // Decide what the target size is, depending whether we're going to
3732   // drain it partially (so that other tasks can steal if they run out
3733   // of things to do) or totally (at the very end).  Notice that,
3734   // because we move entries from the global stack in chunks or
3735   // because another task might be doing the same, we might in fact
3736   // drop below the target. But, this is not a problem.
3737   size_t target_size;
3738   if (partially) {
3739     target_size = _cm->partial_mark_stack_size_target();
3740   } else {
3741     target_size = 0;
3742   }
3743 
3744   if (_cm->mark_stack_size() > target_size) {
3745     if (_cm->verbose_low()) {
3746       gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
3747                              _worker_id, target_size);
3748     }
3749 
3750     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3751       get_entries_from_global_stack();
3752       drain_local_queue(partially);
3753     }
3754 
3755     if (_cm->verbose_low()) {
3756       gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
3757                              _worker_id, _cm->mark_stack_size());
3758     }
3759   }
3760 }
3761 
3762 // SATB Queue has several assumptions on whether to call the par or
3763 // non-par versions of the methods. this is why some of the code is
3764 // replicated. We should really get rid of the single-threaded version
3765 // of the code to simplify things.
3766 void CMTask::drain_satb_buffers() {
3767   if (has_aborted()) return;
3768 
3769   // We set this so that the regular clock knows that we're in the
3770   // middle of draining buffers and doesn't set the abort flag when it
3771   // notices that SATB buffers are available for draining. It'd be
3772   // very counter productive if it did that. :-)
3773   _draining_satb_buffers = true;
3774 
3775   CMObjectClosure oc(this);
3776   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3777   if (G1CollectedHeap::use_parallel_gc_threads()) {
3778     satb_mq_set.set_par_closure(_worker_id, &oc);
3779   } else {
3780     satb_mq_set.set_closure(&oc);
3781   }
3782 
3783   // This keeps claiming and applying the closure to completed buffers
3784   // until we run out of buffers or we need to abort.
3785   if (G1CollectedHeap::use_parallel_gc_threads()) {
3786     while (!has_aborted() &&
3787            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3788       if (_cm->verbose_medium()) {
3789         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3790       }
3791       statsOnly( ++_satb_buffers_processed );
3792       regular_clock_call();
3793     }
3794   } else {
3795     while (!has_aborted() &&
3796            satb_mq_set.apply_closure_to_completed_buffer()) {
3797       if (_cm->verbose_medium()) {
3798         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3799       }
3800       statsOnly( ++_satb_buffers_processed );
3801       regular_clock_call();
3802     }
3803   }
3804 
3805   if (!concurrent() && !has_aborted()) {
3806     // We should only do this during remark.
3807     if (G1CollectedHeap::use_parallel_gc_threads()) {
3808       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3809     } else {
3810       satb_mq_set.iterate_closure_all_threads();
3811     }
3812   }
3813 
3814   _draining_satb_buffers = false;
3815 
3816   assert(has_aborted() ||
3817          concurrent() ||
3818          satb_mq_set.completed_buffers_num() == 0, "invariant");
3819 
3820   if (G1CollectedHeap::use_parallel_gc_threads()) {
3821     satb_mq_set.set_par_closure(_worker_id, NULL);
3822   } else {
3823     satb_mq_set.set_closure(NULL);
3824   }
3825 
3826   // again, this was a potentially expensive operation, decrease the
3827   // limits to get the regular clock call early
3828   decrease_limits();
3829 }
3830 
3831 void CMTask::print_stats() {
3832   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3833                          _worker_id, _calls);
3834   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3835                          _elapsed_time_ms, _termination_time_ms);
3836   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3837                          _step_times_ms.num(), _step_times_ms.avg(),
3838                          _step_times_ms.sd());
3839   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3840                          _step_times_ms.maximum(), _step_times_ms.sum());
3841 
3842 #if _MARKING_STATS_
3843   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3844                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3845                          _all_clock_intervals_ms.sd());
3846   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3847                          _all_clock_intervals_ms.maximum(),
3848                          _all_clock_intervals_ms.sum());
3849   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
3850                          _clock_due_to_scanning, _clock_due_to_marking);
3851   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
3852                          _objs_scanned, _objs_found_on_bitmap);
3853   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
3854                          _local_pushes, _local_pops, _local_max_size);
3855   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
3856                          _global_pushes, _global_pops, _global_max_size);
3857   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
3858                          _global_transfers_to,_global_transfers_from);
3859   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
3860   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
3861   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
3862                          _steal_attempts, _steals);
3863   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
3864   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
3865                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3866   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
3867                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3868 #endif // _MARKING_STATS_
3869 }
3870 
3871 /*****************************************************************************
3872 
3873     The do_marking_step(time_target_ms) method is the building block
3874     of the parallel marking framework. It can be called in parallel
3875     with other invocations of do_marking_step() on different tasks
3876     (but only one per task, obviously) and concurrently with the
3877     mutator threads, or during remark, hence it eliminates the need
3878     for two versions of the code. When called during remark, it will
3879     pick up from where the task left off during the concurrent marking
3880     phase. Interestingly, tasks are also claimable during evacuation
3881     pauses too, since do_marking_step() ensures that it aborts before
3882     it needs to yield.
3883 
3884     The data structures that is uses to do marking work are the
3885     following:
3886 
3887       (1) Marking Bitmap. If there are gray objects that appear only
3888       on the bitmap (this happens either when dealing with an overflow
3889       or when the initial marking phase has simply marked the roots
3890       and didn't push them on the stack), then tasks claim heap
3891       regions whose bitmap they then scan to find gray objects. A
3892       global finger indicates where the end of the last claimed region
3893       is. A local finger indicates how far into the region a task has
3894       scanned. The two fingers are used to determine how to gray an
3895       object (i.e. whether simply marking it is OK, as it will be
3896       visited by a task in the future, or whether it needs to be also
3897       pushed on a stack).
3898 
3899       (2) Local Queue. The local queue of the task which is accessed
3900       reasonably efficiently by the task. Other tasks can steal from
3901       it when they run out of work. Throughout the marking phase, a
3902       task attempts to keep its local queue short but not totally
3903       empty, so that entries are available for stealing by other
3904       tasks. Only when there is no more work, a task will totally
3905       drain its local queue.
3906 
3907       (3) Global Mark Stack. This handles local queue overflow. During
3908       marking only sets of entries are moved between it and the local
3909       queues, as access to it requires a mutex and more fine-grain
3910       interaction with it which might cause contention. If it
3911       overflows, then the marking phase should restart and iterate
3912       over the bitmap to identify gray objects. Throughout the marking
3913       phase, tasks attempt to keep the global mark stack at a small
3914       length but not totally empty, so that entries are available for
3915       popping by other tasks. Only when there is no more work, tasks
3916       will totally drain the global mark stack.
3917 
3918       (4) SATB Buffer Queue. This is where completed SATB buffers are
3919       made available. Buffers are regularly removed from this queue
3920       and scanned for roots, so that the queue doesn't get too
3921       long. During remark, all completed buffers are processed, as
3922       well as the filled in parts of any uncompleted buffers.
3923 
3924     The do_marking_step() method tries to abort when the time target
3925     has been reached. There are a few other cases when the
3926     do_marking_step() method also aborts:
3927 
3928       (1) When the marking phase has been aborted (after a Full GC).
3929 
3930       (2) When a global overflow (on the global stack) has been
3931       triggered. Before the task aborts, it will actually sync up with
3932       the other tasks to ensure that all the marking data structures
3933       (local queues, stacks, fingers etc.)  are re-initialised so that
3934       when do_marking_step() completes, the marking phase can
3935       immediately restart.
3936 
3937       (3) When enough completed SATB buffers are available. The
3938       do_marking_step() method only tries to drain SATB buffers right
3939       at the beginning. So, if enough buffers are available, the
3940       marking step aborts and the SATB buffers are processed at
3941       the beginning of the next invocation.
3942 
3943       (4) To yield. when we have to yield then we abort and yield
3944       right at the end of do_marking_step(). This saves us from a lot
3945       of hassle as, by yielding we might allow a Full GC. If this
3946       happens then objects will be compacted underneath our feet, the
3947       heap might shrink, etc. We save checking for this by just
3948       aborting and doing the yield right at the end.
3949 
3950     From the above it follows that the do_marking_step() method should
3951     be called in a loop (or, otherwise, regularly) until it completes.
3952 
3953     If a marking step completes without its has_aborted() flag being
3954     true, it means it has completed the current marking phase (and
3955     also all other marking tasks have done so and have all synced up).
3956 
3957     A method called regular_clock_call() is invoked "regularly" (in
3958     sub ms intervals) throughout marking. It is this clock method that
3959     checks all the abort conditions which were mentioned above and
3960     decides when the task should abort. A work-based scheme is used to
3961     trigger this clock method: when the number of object words the
3962     marking phase has scanned or the number of references the marking
3963     phase has visited reach a given limit. Additional invocations to
3964     the method clock have been planted in a few other strategic places
3965     too. The initial reason for the clock method was to avoid calling
3966     vtime too regularly, as it is quite expensive. So, once it was in
3967     place, it was natural to piggy-back all the other conditions on it
3968     too and not constantly check them throughout the code.
3969 
3970  *****************************************************************************/
3971 
3972 void CMTask::do_marking_step(double time_target_ms,
3973                              bool do_stealing,
3974                              bool do_termination) {
3975   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3976   assert(concurrent() == _cm->concurrent(), "they should be the same");
3977 
3978   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3979   assert(_task_queues != NULL, "invariant");
3980   assert(_task_queue != NULL, "invariant");
3981   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
3982 
3983   assert(!_claimed,
3984          "only one thread should claim this task at any one time");
3985 
3986   // OK, this doesn't safeguard again all possible scenarios, as it is
3987   // possible for two threads to set the _claimed flag at the same
3988   // time. But it is only for debugging purposes anyway and it will
3989   // catch most problems.
3990   _claimed = true;
3991 
3992   _start_time_ms = os::elapsedVTime() * 1000.0;
3993   statsOnly( _interval_start_time_ms = _start_time_ms );
3994 
3995   double diff_prediction_ms =
3996     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
3997   _time_target_ms = time_target_ms - diff_prediction_ms;
3998 
3999   // set up the variables that are used in the work-based scheme to
4000   // call the regular clock method
4001   _words_scanned = 0;
4002   _refs_reached  = 0;
4003   recalculate_limits();
4004 
4005   // clear all flags
4006   clear_has_aborted();
4007   _has_timed_out = false;
4008   _draining_satb_buffers = false;
4009 
4010   ++_calls;
4011 
4012   if (_cm->verbose_low()) {
4013     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4014                            "target = %1.2lfms >>>>>>>>>>",
4015                            _worker_id, _calls, _time_target_ms);
4016   }
4017 
4018   // Set up the bitmap and oop closures. Anything that uses them is
4019   // eventually called from this method, so it is OK to allocate these
4020   // statically.
4021   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4022   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4023   set_cm_oop_closure(&cm_oop_closure);
4024 
4025   if (_cm->has_overflown()) {
4026     // This can happen if the mark stack overflows during a GC pause
4027     // and this task, after a yield point, restarts. We have to abort
4028     // as we need to get into the overflow protocol which happens
4029     // right at the end of this task.
4030     set_has_aborted();
4031   }
4032 
4033   // First drain any available SATB buffers. After this, we will not
4034   // look at SATB buffers before the next invocation of this method.
4035   // If enough completed SATB buffers are queued up, the regular clock
4036   // will abort this task so that it restarts.
4037   drain_satb_buffers();
4038   // ...then partially drain the local queue and the global stack
4039   drain_local_queue(true);
4040   drain_global_stack(true);
4041 
4042   do {
4043     if (!has_aborted() && _curr_region != NULL) {
4044       // This means that we're already holding on to a region.
4045       assert(_finger != NULL, "if region is not NULL, then the finger "
4046              "should not be NULL either");
4047 
4048       // We might have restarted this task after an evacuation pause
4049       // which might have evacuated the region we're holding on to
4050       // underneath our feet. Let's read its limit again to make sure
4051       // that we do not iterate over a region of the heap that
4052       // contains garbage (update_region_limit() will also move
4053       // _finger to the start of the region if it is found empty).
4054       update_region_limit();
4055       // We will start from _finger not from the start of the region,
4056       // as we might be restarting this task after aborting half-way
4057       // through scanning this region. In this case, _finger points to
4058       // the address where we last found a marked object. If this is a
4059       // fresh region, _finger points to start().
4060       MemRegion mr = MemRegion(_finger, _region_limit);
4061 
4062       if (_cm->verbose_low()) {
4063         gclog_or_tty->print_cr("[%u] we're scanning part "
4064                                "["PTR_FORMAT", "PTR_FORMAT") "
4065                                "of region "HR_FORMAT,
4066                                _worker_id, _finger, _region_limit,
4067                                HR_FORMAT_PARAMS(_curr_region));
4068       }
4069 
4070       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4071              "humongous regions should go around loop once only");
4072 
4073       // Some special cases:
4074       // If the memory region is empty, we can just give up the region.
4075       // If the current region is humongous then we only need to check
4076       // the bitmap for the bit associated with the start of the object,
4077       // scan the object if it's live, and give up the region.
4078       // Otherwise, let's iterate over the bitmap of the part of the region
4079       // that is left.
4080       // If the iteration is successful, give up the region.
4081       if (mr.is_empty()) {
4082         giveup_current_region();
4083         regular_clock_call();
4084       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4085         if (_nextMarkBitMap->isMarked(mr.start())) {
4086           // The object is marked - apply the closure
4087           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4088           bitmap_closure.do_bit(offset);
4089         }
4090         // Even if this task aborted while scanning the humongous object
4091         // we can (and should) give up the current region.
4092         giveup_current_region();
4093         regular_clock_call();
4094       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4095         giveup_current_region();
4096         regular_clock_call();
4097       } else {
4098         assert(has_aborted(), "currently the only way to do so");
4099         // The only way to abort the bitmap iteration is to return
4100         // false from the do_bit() method. However, inside the
4101         // do_bit() method we move the _finger to point to the
4102         // object currently being looked at. So, if we bail out, we
4103         // have definitely set _finger to something non-null.
4104         assert(_finger != NULL, "invariant");
4105 
4106         // Region iteration was actually aborted. So now _finger
4107         // points to the address of the object we last scanned. If we
4108         // leave it there, when we restart this task, we will rescan
4109         // the object. It is easy to avoid this. We move the finger by
4110         // enough to point to the next possible object header (the
4111         // bitmap knows by how much we need to move it as it knows its
4112         // granularity).
4113         assert(_finger < _region_limit, "invariant");
4114         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4115         // Check if bitmap iteration was aborted while scanning the last object
4116         if (new_finger >= _region_limit) {
4117           giveup_current_region();
4118         } else {
4119           move_finger_to(new_finger);
4120         }
4121       }
4122     }
4123     // At this point we have either completed iterating over the
4124     // region we were holding on to, or we have aborted.
4125 
4126     // We then partially drain the local queue and the global stack.
4127     // (Do we really need this?)
4128     drain_local_queue(true);
4129     drain_global_stack(true);
4130 
4131     // Read the note on the claim_region() method on why it might
4132     // return NULL with potentially more regions available for
4133     // claiming and why we have to check out_of_regions() to determine
4134     // whether we're done or not.
4135     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4136       // We are going to try to claim a new region. We should have
4137       // given up on the previous one.
4138       // Separated the asserts so that we know which one fires.
4139       assert(_curr_region  == NULL, "invariant");
4140       assert(_finger       == NULL, "invariant");
4141       assert(_region_limit == NULL, "invariant");
4142       if (_cm->verbose_low()) {
4143         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4144       }
4145       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4146       if (claimed_region != NULL) {
4147         // Yes, we managed to claim one
4148         statsOnly( ++_regions_claimed );
4149 
4150         if (_cm->verbose_low()) {
4151           gclog_or_tty->print_cr("[%u] we successfully claimed "
4152                                  "region "PTR_FORMAT,
4153                                  _worker_id, claimed_region);
4154         }
4155 
4156         setup_for_region(claimed_region);
4157         assert(_curr_region == claimed_region, "invariant");
4158       }
4159       // It is important to call the regular clock here. It might take
4160       // a while to claim a region if, for example, we hit a large
4161       // block of empty regions. So we need to call the regular clock
4162       // method once round the loop to make sure it's called
4163       // frequently enough.
4164       regular_clock_call();
4165     }
4166 
4167     if (!has_aborted() && _curr_region == NULL) {
4168       assert(_cm->out_of_regions(),
4169              "at this point we should be out of regions");
4170     }
4171   } while ( _curr_region != NULL && !has_aborted());
4172 
4173   if (!has_aborted()) {
4174     // We cannot check whether the global stack is empty, since other
4175     // tasks might be pushing objects to it concurrently.
4176     assert(_cm->out_of_regions(),
4177            "at this point we should be out of regions");
4178 
4179     if (_cm->verbose_low()) {
4180       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4181     }
4182 
4183     // Try to reduce the number of available SATB buffers so that
4184     // remark has less work to do.
4185     drain_satb_buffers();
4186   }
4187 
4188   // Since we've done everything else, we can now totally drain the
4189   // local queue and global stack.
4190   drain_local_queue(false);
4191   drain_global_stack(false);
4192 
4193   // Attempt at work stealing from other task's queues.
4194   if (do_stealing && !has_aborted()) {
4195     // We have not aborted. This means that we have finished all that
4196     // we could. Let's try to do some stealing...
4197 
4198     // We cannot check whether the global stack is empty, since other
4199     // tasks might be pushing objects to it concurrently.
4200     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4201            "only way to reach here");
4202 
4203     if (_cm->verbose_low()) {
4204       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4205     }
4206 
4207     while (!has_aborted()) {
4208       oop obj;
4209       statsOnly( ++_steal_attempts );
4210 
4211       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4212         if (_cm->verbose_medium()) {
4213           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4214                                  _worker_id, (void*) obj);
4215         }
4216 
4217         statsOnly( ++_steals );
4218 
4219         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4220                "any stolen object should be marked");
4221         scan_object(obj);
4222 
4223         // And since we're towards the end, let's totally drain the
4224         // local queue and global stack.
4225         drain_local_queue(false);
4226         drain_global_stack(false);
4227       } else {
4228         break;
4229       }
4230     }
4231   }
4232 
4233   // If we are about to wrap up and go into termination, check if we
4234   // should raise the overflow flag.
4235   if (do_termination && !has_aborted()) {
4236     if (_cm->force_overflow()->should_force()) {
4237       _cm->set_has_overflown();
4238       regular_clock_call();
4239     }
4240   }
4241 
4242   // We still haven't aborted. Now, let's try to get into the
4243   // termination protocol.
4244   if (do_termination && !has_aborted()) {
4245     // We cannot check whether the global stack is empty, since other
4246     // tasks might be concurrently pushing objects on it.
4247     // Separated the asserts so that we know which one fires.
4248     assert(_cm->out_of_regions(), "only way to reach here");
4249     assert(_task_queue->size() == 0, "only way to reach here");
4250 
4251     if (_cm->verbose_low()) {
4252       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4253     }
4254 
4255     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4256     // The CMTask class also extends the TerminatorTerminator class,
4257     // hence its should_exit_termination() method will also decide
4258     // whether to exit the termination protocol or not.
4259     bool finished = _cm->terminator()->offer_termination(this);
4260     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4261     _termination_time_ms +=
4262       termination_end_time_ms - _termination_start_time_ms;
4263 
4264     if (finished) {
4265       // We're all done.
4266 
4267       if (_worker_id == 0) {
4268         // let's allow task 0 to do this
4269         if (concurrent()) {
4270           assert(_cm->concurrent_marking_in_progress(), "invariant");
4271           // we need to set this to false before the next
4272           // safepoint. This way we ensure that the marking phase
4273           // doesn't observe any more heap expansions.
4274           _cm->clear_concurrent_marking_in_progress();
4275         }
4276       }
4277 
4278       // We can now guarantee that the global stack is empty, since
4279       // all other tasks have finished. We separated the guarantees so
4280       // that, if a condition is false, we can immediately find out
4281       // which one.
4282       guarantee(_cm->out_of_regions(), "only way to reach here");
4283       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4284       guarantee(_task_queue->size() == 0, "only way to reach here");
4285       guarantee(!_cm->has_overflown(), "only way to reach here");
4286       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4287 
4288       if (_cm->verbose_low()) {
4289         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4290       }
4291     } else {
4292       // Apparently there's more work to do. Let's abort this task. It
4293       // will restart it and we can hopefully find more things to do.
4294 
4295       if (_cm->verbose_low()) {
4296         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4297                                _worker_id);
4298       }
4299 
4300       set_has_aborted();
4301       statsOnly( ++_aborted_termination );
4302     }
4303   }
4304 
4305   // Mainly for debugging purposes to make sure that a pointer to the
4306   // closure which was statically allocated in this frame doesn't
4307   // escape it by accident.
4308   set_cm_oop_closure(NULL);
4309   double end_time_ms = os::elapsedVTime() * 1000.0;
4310   double elapsed_time_ms = end_time_ms - _start_time_ms;
4311   // Update the step history.
4312   _step_times_ms.add(elapsed_time_ms);
4313 
4314   if (has_aborted()) {
4315     // The task was aborted for some reason.
4316 
4317     statsOnly( ++_aborted );
4318 
4319     if (_has_timed_out) {
4320       double diff_ms = elapsed_time_ms - _time_target_ms;
4321       // Keep statistics of how well we did with respect to hitting
4322       // our target only if we actually timed out (if we aborted for
4323       // other reasons, then the results might get skewed).
4324       _marking_step_diffs_ms.add(diff_ms);
4325     }
4326 
4327     if (_cm->has_overflown()) {
4328       // This is the interesting one. We aborted because a global
4329       // overflow was raised. This means we have to restart the
4330       // marking phase and start iterating over regions. However, in
4331       // order to do this we have to make sure that all tasks stop
4332       // what they are doing and re-initialise in a safe manner. We
4333       // will achieve this with the use of two barrier sync points.
4334 
4335       if (_cm->verbose_low()) {
4336         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4337       }
4338 
4339       _cm->enter_first_sync_barrier(_worker_id);
4340       // When we exit this sync barrier we know that all tasks have
4341       // stopped doing marking work. So, it's now safe to
4342       // re-initialise our data structures. At the end of this method,
4343       // task 0 will clear the global data structures.
4344 
4345       statsOnly( ++_aborted_overflow );
4346 
4347       // We clear the local state of this task...
4348       clear_region_fields();
4349 
4350       // ...and enter the second barrier.
4351       _cm->enter_second_sync_barrier(_worker_id);
4352       // At this point everything has bee re-initialised and we're
4353       // ready to restart.
4354     }
4355 
4356     if (_cm->verbose_low()) {
4357       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4358                              "elapsed = %1.2lfms <<<<<<<<<<",
4359                              _worker_id, _time_target_ms, elapsed_time_ms);
4360       if (_cm->has_aborted()) {
4361         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4362                                _worker_id);
4363       }
4364     }
4365   } else {
4366     if (_cm->verbose_low()) {
4367       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4368                              "elapsed = %1.2lfms <<<<<<<<<<",
4369                              _worker_id, _time_target_ms, elapsed_time_ms);
4370     }
4371   }
4372 
4373   _claimed = false;
4374 }
4375 
4376 CMTask::CMTask(uint worker_id,
4377                ConcurrentMark* cm,
4378                size_t* marked_bytes,
4379                BitMap* card_bm,
4380                CMTaskQueue* task_queue,
4381                CMTaskQueueSet* task_queues)
4382   : _g1h(G1CollectedHeap::heap()),
4383     _worker_id(worker_id), _cm(cm),
4384     _claimed(false),
4385     _nextMarkBitMap(NULL), _hash_seed(17),
4386     _task_queue(task_queue),
4387     _task_queues(task_queues),
4388     _cm_oop_closure(NULL),
4389     _marked_bytes_array(marked_bytes),
4390     _card_bm(card_bm) {
4391   guarantee(task_queue != NULL, "invariant");
4392   guarantee(task_queues != NULL, "invariant");
4393 
4394   statsOnly( _clock_due_to_scanning = 0;
4395              _clock_due_to_marking  = 0 );
4396 
4397   _marking_step_diffs_ms.add(0.5);
4398 }
4399 
4400 // These are formatting macros that are used below to ensure
4401 // consistent formatting. The *_H_* versions are used to format the
4402 // header for a particular value and they should be kept consistent
4403 // with the corresponding macro. Also note that most of the macros add
4404 // the necessary white space (as a prefix) which makes them a bit
4405 // easier to compose.
4406 
4407 // All the output lines are prefixed with this string to be able to
4408 // identify them easily in a large log file.
4409 #define G1PPRL_LINE_PREFIX            "###"
4410 
4411 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4412 #ifdef _LP64
4413 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4414 #else // _LP64
4415 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4416 #endif // _LP64
4417 
4418 // For per-region info
4419 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4420 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4421 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4422 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4423 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4424 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4425 
4426 // For summary info
4427 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4428 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4429 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4430 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4431 
4432 G1PrintRegionLivenessInfoClosure::
4433 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4434   : _out(out),
4435     _total_used_bytes(0), _total_capacity_bytes(0),
4436     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4437     _hum_used_bytes(0), _hum_capacity_bytes(0),
4438     _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
4439   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4440   MemRegion g1_committed = g1h->g1_committed();
4441   MemRegion g1_reserved = g1h->g1_reserved();
4442   double now = os::elapsedTime();
4443 
4444   // Print the header of the output.
4445   _out->cr();
4446   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4447   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4448                  G1PPRL_SUM_ADDR_FORMAT("committed")
4449                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4450                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4451                  g1_committed.start(), g1_committed.end(),
4452                  g1_reserved.start(), g1_reserved.end(),
4453                  HeapRegion::GrainBytes);
4454   _out->print_cr(G1PPRL_LINE_PREFIX);
4455   _out->print_cr(G1PPRL_LINE_PREFIX
4456                  G1PPRL_TYPE_H_FORMAT
4457                  G1PPRL_ADDR_BASE_H_FORMAT
4458                  G1PPRL_BYTE_H_FORMAT
4459                  G1PPRL_BYTE_H_FORMAT
4460                  G1PPRL_BYTE_H_FORMAT
4461                  G1PPRL_DOUBLE_H_FORMAT,
4462                  "type", "address-range",
4463                  "used", "prev-live", "next-live", "gc-eff");
4464   _out->print_cr(G1PPRL_LINE_PREFIX
4465                  G1PPRL_TYPE_H_FORMAT
4466                  G1PPRL_ADDR_BASE_H_FORMAT
4467                  G1PPRL_BYTE_H_FORMAT
4468                  G1PPRL_BYTE_H_FORMAT
4469                  G1PPRL_BYTE_H_FORMAT
4470                  G1PPRL_DOUBLE_H_FORMAT,
4471                  "", "",
4472                  "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
4473 }
4474 
4475 // It takes as a parameter a reference to one of the _hum_* fields, it
4476 // deduces the corresponding value for a region in a humongous region
4477 // series (either the region size, or what's left if the _hum_* field
4478 // is < the region size), and updates the _hum_* field accordingly.
4479 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4480   size_t bytes = 0;
4481   // The > 0 check is to deal with the prev and next live bytes which
4482   // could be 0.
4483   if (*hum_bytes > 0) {
4484     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4485     *hum_bytes -= bytes;
4486   }
4487   return bytes;
4488 }
4489 
4490 // It deduces the values for a region in a humongous region series
4491 // from the _hum_* fields and updates those accordingly. It assumes
4492 // that that _hum_* fields have already been set up from the "starts
4493 // humongous" region and we visit the regions in address order.
4494 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4495                                                      size_t* capacity_bytes,
4496                                                      size_t* prev_live_bytes,
4497                                                      size_t* next_live_bytes) {
4498   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4499   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4500   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4501   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4502   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4503 }
4504 
4505 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4506   const char* type = "";
4507   HeapWord* bottom       = r->bottom();
4508   HeapWord* end          = r->end();
4509   size_t capacity_bytes  = r->capacity();
4510   size_t used_bytes      = r->used();
4511   size_t prev_live_bytes = r->live_bytes();
4512   size_t next_live_bytes = r->next_live_bytes();
4513   double gc_eff          = r->gc_efficiency();
4514   if (r->used() == 0) {
4515     type = "FREE";
4516   } else if (r->is_survivor()) {
4517     type = "SURV";
4518   } else if (r->is_young()) {
4519     type = "EDEN";
4520   } else if (r->startsHumongous()) {
4521     type = "HUMS";
4522 
4523     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4524            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4525            "they should have been zeroed after the last time we used them");
4526     // Set up the _hum_* fields.
4527     _hum_capacity_bytes  = capacity_bytes;
4528     _hum_used_bytes      = used_bytes;
4529     _hum_prev_live_bytes = prev_live_bytes;
4530     _hum_next_live_bytes = next_live_bytes;
4531     get_hum_bytes(&used_bytes, &capacity_bytes,
4532                   &prev_live_bytes, &next_live_bytes);
4533     end = bottom + HeapRegion::GrainWords;
4534   } else if (r->continuesHumongous()) {
4535     type = "HUMC";
4536     get_hum_bytes(&used_bytes, &capacity_bytes,
4537                   &prev_live_bytes, &next_live_bytes);
4538     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4539   } else {
4540     type = "OLD";
4541   }
4542 
4543   _total_used_bytes      += used_bytes;
4544   _total_capacity_bytes  += capacity_bytes;
4545   _total_prev_live_bytes += prev_live_bytes;
4546   _total_next_live_bytes += next_live_bytes;
4547 
4548   // Print a line for this particular region.
4549   _out->print_cr(G1PPRL_LINE_PREFIX
4550                  G1PPRL_TYPE_FORMAT
4551                  G1PPRL_ADDR_BASE_FORMAT
4552                  G1PPRL_BYTE_FORMAT
4553                  G1PPRL_BYTE_FORMAT
4554                  G1PPRL_BYTE_FORMAT
4555                  G1PPRL_DOUBLE_FORMAT,
4556                  type, bottom, end,
4557                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
4558 
4559   return false;
4560 }
4561 
4562 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4563   // Print the footer of the output.
4564   _out->print_cr(G1PPRL_LINE_PREFIX);
4565   _out->print_cr(G1PPRL_LINE_PREFIX
4566                  " SUMMARY"
4567                  G1PPRL_SUM_MB_FORMAT("capacity")
4568                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4569                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4570                  G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
4571                  bytes_to_mb(_total_capacity_bytes),
4572                  bytes_to_mb(_total_used_bytes),
4573                  perc(_total_used_bytes, _total_capacity_bytes),
4574                  bytes_to_mb(_total_prev_live_bytes),
4575                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4576                  bytes_to_mb(_total_next_live_bytes),
4577                  perc(_total_next_live_bytes, _total_capacity_bytes));
4578   _out->cr();
4579 }