1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "memory/genOopClosures.inline.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "services/memTracker.hpp"
  46 
  47 // Concurrent marking bit map wrapper
  48 
  49 CMBitMapRO::CMBitMapRO(int shifter) :
  50   _bm(),
  51   _shifter(shifter) {
  52   _bmStartWord = 0;
  53   _bmWordSize = 0;
  54 }
  55 
  56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  57                                                HeapWord* limit) const {
  58   // First we must round addr *up* to a possible object boundary.
  59   addr = (HeapWord*)align_size_up((intptr_t)addr,
  60                                   HeapWordSize << _shifter);
  61   size_t addrOffset = heapWordToOffset(addr);
  62   if (limit == NULL) {
  63     limit = _bmStartWord + _bmWordSize;
  64   }
  65   size_t limitOffset = heapWordToOffset(limit);
  66   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  67   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  68   assert(nextAddr >= addr, "get_next_one postcondition");
  69   assert(nextAddr == limit || isMarked(nextAddr),
  70          "get_next_one postcondition");
  71   return nextAddr;
  72 }
  73 
  74 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
  75                                                  HeapWord* limit) const {
  76   size_t addrOffset = heapWordToOffset(addr);
  77   if (limit == NULL) {
  78     limit = _bmStartWord + _bmWordSize;
  79   }
  80   size_t limitOffset = heapWordToOffset(limit);
  81   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  82   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  83   assert(nextAddr >= addr, "get_next_one postcondition");
  84   assert(nextAddr == limit || !isMarked(nextAddr),
  85          "get_next_one postcondition");
  86   return nextAddr;
  87 }
  88 
  89 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  90   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  91   return (int) (diff >> _shifter);
  92 }
  93 
  94 #ifndef PRODUCT
  95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
  96   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  97   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  98          "size inconsistency");
  99   return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
 100          _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
 101 }
 102 #endif
 103 
 104 bool CMBitMap::allocate(ReservedSpace heap_rs) {
 105   _bmStartWord = (HeapWord*)(heap_rs.base());
 106   _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
 107   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
 108                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 109   if (!brs.is_reserved()) {
 110     warning("ConcurrentMark marking bit map allocation failure");
 111     return false;
 112   }
 113   MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
 114   // For now we'll just commit all of the bit map up front.
 115   // Later on we'll try to be more parsimonious with swap.
 116   if (!_virtual_space.initialize(brs, brs.size())) {
 117     warning("ConcurrentMark marking bit map backing store failure");
 118     return false;
 119   }
 120   assert(_virtual_space.committed_size() == brs.size(),
 121          "didn't reserve backing store for all of concurrent marking bit map?");
 122   _bm.set_map((uintptr_t*)_virtual_space.low());
 123   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
 124          _bmWordSize, "inconsistency in bit map sizing");
 125   _bm.set_size(_bmWordSize >> _shifter);
 126   return true;
 127 }
 128 
 129 void CMBitMap::clearAll() {
 130   _bm.clear();
 131   return;
 132 }
 133 
 134 void CMBitMap::markRange(MemRegion mr) {
 135   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 136   assert(!mr.is_empty(), "unexpected empty region");
 137   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 138           ((HeapWord *) mr.end())),
 139          "markRange memory region end is not card aligned");
 140   // convert address range into offset range
 141   _bm.at_put_range(heapWordToOffset(mr.start()),
 142                    heapWordToOffset(mr.end()), true);
 143 }
 144 
 145 void CMBitMap::clearRange(MemRegion mr) {
 146   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 147   assert(!mr.is_empty(), "unexpected empty region");
 148   // convert address range into offset range
 149   _bm.at_put_range(heapWordToOffset(mr.start()),
 150                    heapWordToOffset(mr.end()), false);
 151 }
 152 
 153 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 154                                             HeapWord* end_addr) {
 155   HeapWord* start = getNextMarkedWordAddress(addr);
 156   start = MIN2(start, end_addr);
 157   HeapWord* end   = getNextUnmarkedWordAddress(start);
 158   end = MIN2(end, end_addr);
 159   assert(start <= end, "Consistency check");
 160   MemRegion mr(start, end);
 161   if (!mr.is_empty()) {
 162     clearRange(mr);
 163   }
 164   return mr;
 165 }
 166 
 167 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 168   _base(NULL), _cm(cm)
 169 #ifdef ASSERT
 170   , _drain_in_progress(false)
 171   , _drain_in_progress_yields(false)
 172 #endif
 173 {}
 174 
 175 bool CMMarkStack::allocate(size_t capacity) {
 176   // allocate a stack of the requisite depth
 177   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 178   if (!rs.is_reserved()) {
 179     warning("ConcurrentMark MarkStack allocation failure");
 180     return false;
 181   }
 182   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 183   if (!_virtual_space.initialize(rs, rs.size())) {
 184     warning("ConcurrentMark MarkStack backing store failure");
 185     // Release the virtual memory reserved for the marking stack
 186     rs.release();
 187     return false;
 188   }
 189   assert(_virtual_space.committed_size() == rs.size(),
 190          "Didn't reserve backing store for all of ConcurrentMark stack?");
 191   _base = (oop*) _virtual_space.low();
 192   setEmpty();
 193   _capacity = (jint) capacity;
 194   _saved_index = -1;
 195   _should_expand = false;
 196   NOT_PRODUCT(_max_depth = 0);
 197   return true;
 198 }
 199 
 200 void CMMarkStack::expand() {
 201   // Called, during remark, if we've overflown the marking stack during marking.
 202   assert(isEmpty(), "stack should been emptied while handling overflow");
 203   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 204   // Clear expansion flag
 205   _should_expand = false;
 206   if (_capacity == (jint) MarkStackSizeMax) {
 207     if (PrintGCDetails && Verbose) {
 208       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 209     }
 210     return;
 211   }
 212   // Double capacity if possible
 213   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 214   // Do not give up existing stack until we have managed to
 215   // get the double capacity that we desired.
 216   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 217                                                            sizeof(oop)));
 218   if (rs.is_reserved()) {
 219     // Release the backing store associated with old stack
 220     _virtual_space.release();
 221     // Reinitialize virtual space for new stack
 222     if (!_virtual_space.initialize(rs, rs.size())) {
 223       fatal("Not enough swap for expanded marking stack capacity");
 224     }
 225     _base = (oop*)(_virtual_space.low());
 226     _index = 0;
 227     _capacity = new_capacity;
 228   } else {
 229     if (PrintGCDetails && Verbose) {
 230       // Failed to double capacity, continue;
 231       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 232                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 233                           _capacity / K, new_capacity / K);
 234     }
 235   }
 236 }
 237 
 238 void CMMarkStack::set_should_expand() {
 239   // If we're resetting the marking state because of an
 240   // marking stack overflow, record that we should, if
 241   // possible, expand the stack.
 242   _should_expand = _cm->has_overflown();
 243 }
 244 
 245 CMMarkStack::~CMMarkStack() {
 246   if (_base != NULL) {
 247     _base = NULL;
 248     _virtual_space.release();
 249   }
 250 }
 251 
 252 void CMMarkStack::par_push(oop ptr) {
 253   while (true) {
 254     if (isFull()) {
 255       _overflow = true;
 256       return;
 257     }
 258     // Otherwise...
 259     jint index = _index;
 260     jint next_index = index+1;
 261     jint res = Atomic::cmpxchg(next_index, &_index, index);
 262     if (res == index) {
 263       _base[index] = ptr;
 264       // Note that we don't maintain this atomically.  We could, but it
 265       // doesn't seem necessary.
 266       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 267       return;
 268     }
 269     // Otherwise, we need to try again.
 270   }
 271 }
 272 
 273 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 274   while (true) {
 275     if (isFull()) {
 276       _overflow = true;
 277       return;
 278     }
 279     // Otherwise...
 280     jint index = _index;
 281     jint next_index = index + n;
 282     if (next_index > _capacity) {
 283       _overflow = true;
 284       return;
 285     }
 286     jint res = Atomic::cmpxchg(next_index, &_index, index);
 287     if (res == index) {
 288       for (int i = 0; i < n; i++) {
 289         int  ind = index + i;
 290         assert(ind < _capacity, "By overflow test above.");
 291         _base[ind] = ptr_arr[i];
 292       }
 293       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 294       return;
 295     }
 296     // Otherwise, we need to try again.
 297   }
 298 }
 299 
 300 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 301   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 302   jint start = _index;
 303   jint next_index = start + n;
 304   if (next_index > _capacity) {
 305     _overflow = true;
 306     return;
 307   }
 308   // Otherwise.
 309   _index = next_index;
 310   for (int i = 0; i < n; i++) {
 311     int ind = start + i;
 312     assert(ind < _capacity, "By overflow test above.");
 313     _base[ind] = ptr_arr[i];
 314   }
 315   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 316 }
 317 
 318 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 319   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 320   jint index = _index;
 321   if (index == 0) {
 322     *n = 0;
 323     return false;
 324   } else {
 325     int k = MIN2(max, index);
 326     jint  new_ind = index - k;
 327     for (int j = 0; j < k; j++) {
 328       ptr_arr[j] = _base[new_ind + j];
 329     }
 330     _index = new_ind;
 331     *n = k;
 332     return true;
 333   }
 334 }
 335 
 336 template<class OopClosureClass>
 337 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 338   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 339          || SafepointSynchronize::is_at_safepoint(),
 340          "Drain recursion must be yield-safe.");
 341   bool res = true;
 342   debug_only(_drain_in_progress = true);
 343   debug_only(_drain_in_progress_yields = yield_after);
 344   while (!isEmpty()) {
 345     oop newOop = pop();
 346     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 347     assert(newOop->is_oop(), "Expected an oop");
 348     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 349            "only grey objects on this stack");
 350     newOop->oop_iterate(cl);
 351     if (yield_after && _cm->do_yield_check()) {
 352       res = false;
 353       break;
 354     }
 355   }
 356   debug_only(_drain_in_progress = false);
 357   return res;
 358 }
 359 
 360 void CMMarkStack::note_start_of_gc() {
 361   assert(_saved_index == -1,
 362          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 363   _saved_index = _index;
 364 }
 365 
 366 void CMMarkStack::note_end_of_gc() {
 367   // This is intentionally a guarantee, instead of an assert. If we
 368   // accidentally add something to the mark stack during GC, it
 369   // will be a correctness issue so it's better if we crash. we'll
 370   // only check this once per GC anyway, so it won't be a performance
 371   // issue in any way.
 372   guarantee(_saved_index == _index,
 373             err_msg("saved index: %d index: %d", _saved_index, _index));
 374   _saved_index = -1;
 375 }
 376 
 377 void CMMarkStack::oops_do(OopClosure* f) {
 378   assert(_saved_index == _index,
 379          err_msg("saved index: %d index: %d", _saved_index, _index));
 380   for (int i = 0; i < _index; i += 1) {
 381     f->do_oop(&_base[i]);
 382   }
 383 }
 384 
 385 bool ConcurrentMark::not_yet_marked(oop obj) const {
 386   return _g1h->is_obj_ill(obj);
 387 }
 388 
 389 CMRootRegions::CMRootRegions() :
 390   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 391   _should_abort(false),  _next_survivor(NULL) { }
 392 
 393 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 394   _young_list = g1h->young_list();
 395   _cm = cm;
 396 }
 397 
 398 void CMRootRegions::prepare_for_scan() {
 399   assert(!scan_in_progress(), "pre-condition");
 400 
 401   // Currently, only survivors can be root regions.
 402   assert(_next_survivor == NULL, "pre-condition");
 403   _next_survivor = _young_list->first_survivor_region();
 404   _scan_in_progress = (_next_survivor != NULL);
 405   _should_abort = false;
 406 }
 407 
 408 HeapRegion* CMRootRegions::claim_next() {
 409   if (_should_abort) {
 410     // If someone has set the should_abort flag, we return NULL to
 411     // force the caller to bail out of their loop.
 412     return NULL;
 413   }
 414 
 415   // Currently, only survivors can be root regions.
 416   HeapRegion* res = _next_survivor;
 417   if (res != NULL) {
 418     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 419     // Read it again in case it changed while we were waiting for the lock.
 420     res = _next_survivor;
 421     if (res != NULL) {
 422       if (res == _young_list->last_survivor_region()) {
 423         // We just claimed the last survivor so store NULL to indicate
 424         // that we're done.
 425         _next_survivor = NULL;
 426       } else {
 427         _next_survivor = res->get_next_young_region();
 428       }
 429     } else {
 430       // Someone else claimed the last survivor while we were trying
 431       // to take the lock so nothing else to do.
 432     }
 433   }
 434   assert(res == NULL || res->is_survivor(), "post-condition");
 435 
 436   return res;
 437 }
 438 
 439 void CMRootRegions::scan_finished() {
 440   assert(scan_in_progress(), "pre-condition");
 441 
 442   // Currently, only survivors can be root regions.
 443   if (!_should_abort) {
 444     assert(_next_survivor == NULL, "we should have claimed all survivors");
 445   }
 446   _next_survivor = NULL;
 447 
 448   {
 449     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 450     _scan_in_progress = false;
 451     RootRegionScan_lock->notify_all();
 452   }
 453 }
 454 
 455 bool CMRootRegions::wait_until_scan_finished() {
 456   if (!scan_in_progress()) return false;
 457 
 458   {
 459     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 460     while (scan_in_progress()) {
 461       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 462     }
 463   }
 464   return true;
 465 }
 466 
 467 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 468 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 469 #endif // _MSC_VER
 470 
 471 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 472   return MAX2((n_par_threads + 2) / 4, 1U);
 473 }
 474 
 475 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
 476   _g1h(g1h),
 477   _markBitMap1(MinObjAlignment - 1),
 478   _markBitMap2(MinObjAlignment - 1),
 479 
 480   _parallel_marking_threads(0),
 481   _max_parallel_marking_threads(0),
 482   _sleep_factor(0.0),
 483   _marking_task_overhead(1.0),
 484   _cleanup_sleep_factor(0.0),
 485   _cleanup_task_overhead(1.0),
 486   _cleanup_list("Cleanup List"),
 487   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 488   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
 489             CardTableModRefBS::card_shift,
 490             false /* in_resource_area*/),
 491 
 492   _prevMarkBitMap(&_markBitMap1),
 493   _nextMarkBitMap(&_markBitMap2),
 494 
 495   _markStack(this),
 496   // _finger set in set_non_marking_state
 497 
 498   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 499   // _active_tasks set in set_non_marking_state
 500   // _tasks set inside the constructor
 501   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 502   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 503 
 504   _has_overflown(false),
 505   _concurrent(false),
 506   _has_aborted(false),
 507   _restart_for_overflow(false),
 508   _concurrent_marking_in_progress(false),
 509 
 510   // _verbose_level set below
 511 
 512   _init_times(),
 513   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 514   _cleanup_times(),
 515   _total_counting_time(0.0),
 516   _total_rs_scrub_time(0.0),
 517 
 518   _parallel_workers(NULL),
 519 
 520   _count_card_bitmaps(NULL),
 521   _count_marked_bytes(NULL),
 522   _completed_initialization(false) {
 523   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 524   if (verbose_level < no_verbose) {
 525     verbose_level = no_verbose;
 526   }
 527   if (verbose_level > high_verbose) {
 528     verbose_level = high_verbose;
 529   }
 530   _verbose_level = verbose_level;
 531 
 532   if (verbose_low()) {
 533     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 534                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 535   }
 536 
 537   if (!_markBitMap1.allocate(heap_rs)) {
 538     warning("Failed to allocate first CM bit map");
 539     return;
 540   }
 541   if (!_markBitMap2.allocate(heap_rs)) {
 542     warning("Failed to allocate second CM bit map");
 543     return;
 544   }
 545 
 546   // Create & start a ConcurrentMark thread.
 547   _cmThread = new ConcurrentMarkThread(this);
 548   assert(cmThread() != NULL, "CM Thread should have been created");
 549   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 550 
 551   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 552   assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
 553   assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
 554 
 555   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 556   satb_qs.set_buffer_size(G1SATBBufferSize);
 557 
 558   _root_regions.init(_g1h, this);
 559 
 560   if (ConcGCThreads > ParallelGCThreads) {
 561     warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
 562             "than ParallelGCThreads (" UINT32_FORMAT ").",
 563             ConcGCThreads, ParallelGCThreads);
 564     return;
 565   }
 566   if (ParallelGCThreads == 0) {
 567     // if we are not running with any parallel GC threads we will not
 568     // spawn any marking threads either
 569     _parallel_marking_threads =       0;
 570     _max_parallel_marking_threads =   0;
 571     _sleep_factor             =     0.0;
 572     _marking_task_overhead    =     1.0;
 573   } else {
 574     if (ConcGCThreads > 0) {
 575       // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
 576       // if both are set
 577 
 578       _parallel_marking_threads = (uint) ConcGCThreads;
 579       _max_parallel_marking_threads = _parallel_marking_threads;
 580       _sleep_factor             = 0.0;
 581       _marking_task_overhead    = 1.0;
 582     } else if (G1MarkingOverheadPercent > 0) {
 583       // we will calculate the number of parallel marking threads
 584       // based on a target overhead with respect to the soft real-time
 585       // goal
 586 
 587       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 588       double overall_cm_overhead =
 589         (double) MaxGCPauseMillis * marking_overhead /
 590         (double) GCPauseIntervalMillis;
 591       double cpu_ratio = 1.0 / (double) os::processor_count();
 592       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 593       double marking_task_overhead =
 594         overall_cm_overhead / marking_thread_num *
 595                                                 (double) os::processor_count();
 596       double sleep_factor =
 597                          (1.0 - marking_task_overhead) / marking_task_overhead;
 598 
 599       _parallel_marking_threads = (uint) marking_thread_num;
 600       _max_parallel_marking_threads = _parallel_marking_threads;
 601       _sleep_factor             = sleep_factor;
 602       _marking_task_overhead    = marking_task_overhead;
 603     } else {
 604       _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
 605       _max_parallel_marking_threads = _parallel_marking_threads;
 606       _sleep_factor             = 0.0;
 607       _marking_task_overhead    = 1.0;
 608     }
 609 
 610     if (parallel_marking_threads() > 1) {
 611       _cleanup_task_overhead = 1.0;
 612     } else {
 613       _cleanup_task_overhead = marking_task_overhead();
 614     }
 615     _cleanup_sleep_factor =
 616                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 617 
 618 #if 0
 619     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 620     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 621     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 622     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 623     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 624 #endif
 625 
 626     guarantee(parallel_marking_threads() > 0, "peace of mind");
 627     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 628          _max_parallel_marking_threads, false, true);
 629     if (_parallel_workers == NULL) {
 630       vm_exit_during_initialization("Failed necessary allocation.");
 631     } else {
 632       _parallel_workers->initialize_workers();
 633     }
 634   }
 635 
 636   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 637     uintx mark_stack_size =
 638       MIN2(MarkStackSizeMax,
 639           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 640     // Verify that the calculated value for MarkStackSize is in range.
 641     // It would be nice to use the private utility routine from Arguments.
 642     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 643       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 644               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 645               mark_stack_size, 1, MarkStackSizeMax);
 646       return;
 647     }
 648     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 649   } else {
 650     // Verify MarkStackSize is in range.
 651     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 652       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 653         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 654           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 655                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 656                   MarkStackSize, 1, MarkStackSizeMax);
 657           return;
 658         }
 659       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 660         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 661           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 662                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 663                   MarkStackSize, MarkStackSizeMax);
 664           return;
 665         }
 666       }
 667     }
 668   }
 669 
 670   if (!_markStack.allocate(MarkStackSize)) {
 671     warning("Failed to allocate CM marking stack");
 672     return;
 673   }
 674 
 675   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 676   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 677 
 678   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 679   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 680 
 681   BitMap::idx_t card_bm_size = _card_bm.size();
 682 
 683   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 684   _active_tasks = _max_worker_id;
 685 
 686   size_t max_regions = (size_t) _g1h->max_regions();
 687   for (uint i = 0; i < _max_worker_id; ++i) {
 688     CMTaskQueue* task_queue = new CMTaskQueue();
 689     task_queue->initialize();
 690     _task_queues->register_queue(i, task_queue);
 691 
 692     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 693     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 694 
 695     _tasks[i] = new CMTask(i, this,
 696                            _count_marked_bytes[i],
 697                            &_count_card_bitmaps[i],
 698                            task_queue, _task_queues);
 699 
 700     _accum_task_vtime[i] = 0.0;
 701   }
 702 
 703   // Calculate the card number for the bottom of the heap. Used
 704   // in biasing indexes into the accounting card bitmaps.
 705   _heap_bottom_card_num =
 706     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 707                                 CardTableModRefBS::card_shift);
 708 
 709   // Clear all the liveness counting data
 710   clear_all_count_data();
 711 
 712   // so that the call below can read a sensible value
 713   _heap_start = (HeapWord*) heap_rs.base();
 714   set_non_marking_state();
 715   _completed_initialization = true;
 716 }
 717 
 718 void ConcurrentMark::update_g1_committed(bool force) {
 719   // If concurrent marking is not in progress, then we do not need to
 720   // update _heap_end.
 721   if (!concurrent_marking_in_progress() && !force) return;
 722 
 723   MemRegion committed = _g1h->g1_committed();
 724   assert(committed.start() == _heap_start, "start shouldn't change");
 725   HeapWord* new_end = committed.end();
 726   if (new_end > _heap_end) {
 727     // The heap has been expanded.
 728 
 729     _heap_end = new_end;
 730   }
 731   // Notice that the heap can also shrink. However, this only happens
 732   // during a Full GC (at least currently) and the entire marking
 733   // phase will bail out and the task will not be restarted. So, let's
 734   // do nothing.
 735 }
 736 
 737 void ConcurrentMark::reset() {
 738   // Starting values for these two. This should be called in a STW
 739   // phase. CM will be notified of any future g1_committed expansions
 740   // will be at the end of evacuation pauses, when tasks are
 741   // inactive.
 742   MemRegion committed = _g1h->g1_committed();
 743   _heap_start = committed.start();
 744   _heap_end   = committed.end();
 745 
 746   // Separated the asserts so that we know which one fires.
 747   assert(_heap_start != NULL, "heap bounds should look ok");
 748   assert(_heap_end != NULL, "heap bounds should look ok");
 749   assert(_heap_start < _heap_end, "heap bounds should look ok");
 750 
 751   // Reset all the marking data structures and any necessary flags
 752   reset_marking_state();
 753 
 754   if (verbose_low()) {
 755     gclog_or_tty->print_cr("[global] resetting");
 756   }
 757 
 758   // We do reset all of them, since different phases will use
 759   // different number of active threads. So, it's easiest to have all
 760   // of them ready.
 761   for (uint i = 0; i < _max_worker_id; ++i) {
 762     _tasks[i]->reset(_nextMarkBitMap);
 763   }
 764 
 765   // we need this to make sure that the flag is on during the evac
 766   // pause with initial mark piggy-backed
 767   set_concurrent_marking_in_progress();
 768 }
 769 
 770 
 771 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 772   _markStack.set_should_expand();
 773   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 774   if (clear_overflow) {
 775     clear_has_overflown();
 776   } else {
 777     assert(has_overflown(), "pre-condition");
 778   }
 779   _finger = _heap_start;
 780 
 781   for (uint i = 0; i < _max_worker_id; ++i) {
 782     CMTaskQueue* queue = _task_queues->queue(i);
 783     queue->set_empty();
 784   }
 785 }
 786 
 787 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
 788   assert(active_tasks <= _max_worker_id, "we should not have more");
 789 
 790   _active_tasks = active_tasks;
 791   // Need to update the three data structures below according to the
 792   // number of active threads for this phase.
 793   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 794   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 795   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 796 
 797   _concurrent = concurrent;
 798   // We propagate this to all tasks, not just the active ones.
 799   for (uint i = 0; i < _max_worker_id; ++i)
 800     _tasks[i]->set_concurrent(concurrent);
 801 
 802   if (concurrent) {
 803     set_concurrent_marking_in_progress();
 804   } else {
 805     // We currently assume that the concurrent flag has been set to
 806     // false before we start remark. At this point we should also be
 807     // in a STW phase.
 808     assert(!concurrent_marking_in_progress(), "invariant");
 809     assert(_finger == _heap_end, "only way to get here");
 810     update_g1_committed(true);
 811   }
 812 }
 813 
 814 void ConcurrentMark::set_non_marking_state() {
 815   // We set the global marking state to some default values when we're
 816   // not doing marking.
 817   reset_marking_state();
 818   _active_tasks = 0;
 819   clear_concurrent_marking_in_progress();
 820 }
 821 
 822 ConcurrentMark::~ConcurrentMark() {
 823   // The ConcurrentMark instance is never freed.
 824   ShouldNotReachHere();
 825 }
 826 
 827 void ConcurrentMark::clearNextBitmap() {
 828   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 829   G1CollectorPolicy* g1p = g1h->g1_policy();
 830 
 831   // Make sure that the concurrent mark thread looks to still be in
 832   // the current cycle.
 833   guarantee(cmThread()->during_cycle(), "invariant");
 834 
 835   // We are finishing up the current cycle by clearing the next
 836   // marking bitmap and getting it ready for the next cycle. During
 837   // this time no other cycle can start. So, let's make sure that this
 838   // is the case.
 839   guarantee(!g1h->mark_in_progress(), "invariant");
 840 
 841   // clear the mark bitmap (no grey objects to start with).
 842   // We need to do this in chunks and offer to yield in between
 843   // each chunk.
 844   HeapWord* start  = _nextMarkBitMap->startWord();
 845   HeapWord* end    = _nextMarkBitMap->endWord();
 846   HeapWord* cur    = start;
 847   size_t chunkSize = M;
 848   while (cur < end) {
 849     HeapWord* next = cur + chunkSize;
 850     if (next > end) {
 851       next = end;
 852     }
 853     MemRegion mr(cur,next);
 854     _nextMarkBitMap->clearRange(mr);
 855     cur = next;
 856     do_yield_check();
 857 
 858     // Repeat the asserts from above. We'll do them as asserts here to
 859     // minimize their overhead on the product. However, we'll have
 860     // them as guarantees at the beginning / end of the bitmap
 861     // clearing to get some checking in the product.
 862     assert(cmThread()->during_cycle(), "invariant");
 863     assert(!g1h->mark_in_progress(), "invariant");
 864   }
 865 
 866   // Clear the liveness counting data
 867   clear_all_count_data();
 868 
 869   // Repeat the asserts from above.
 870   guarantee(cmThread()->during_cycle(), "invariant");
 871   guarantee(!g1h->mark_in_progress(), "invariant");
 872 }
 873 
 874 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 875 public:
 876   bool doHeapRegion(HeapRegion* r) {
 877     if (!r->continuesHumongous()) {
 878       r->note_start_of_marking();
 879     }
 880     return false;
 881   }
 882 };
 883 
 884 void ConcurrentMark::checkpointRootsInitialPre() {
 885   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 886   G1CollectorPolicy* g1p = g1h->g1_policy();
 887 
 888   _has_aborted = false;
 889 
 890 #ifndef PRODUCT
 891   if (G1PrintReachableAtInitialMark) {
 892     print_reachable("at-cycle-start",
 893                     VerifyOption_G1UsePrevMarking, true /* all */);
 894   }
 895 #endif
 896 
 897   // Initialise marking structures. This has to be done in a STW phase.
 898   reset();
 899 
 900   // For each region note start of marking.
 901   NoteStartOfMarkHRClosure startcl;
 902   g1h->heap_region_iterate(&startcl);
 903 }
 904 
 905 
 906 void ConcurrentMark::checkpointRootsInitialPost() {
 907   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 908 
 909   // If we force an overflow during remark, the remark operation will
 910   // actually abort and we'll restart concurrent marking. If we always
 911   // force an oveflow during remark we'll never actually complete the
 912   // marking phase. So, we initilize this here, at the start of the
 913   // cycle, so that at the remaining overflow number will decrease at
 914   // every remark and we'll eventually not need to cause one.
 915   force_overflow_stw()->init();
 916 
 917   // Start Concurrent Marking weak-reference discovery.
 918   ReferenceProcessor* rp = g1h->ref_processor_cm();
 919   // enable ("weak") refs discovery
 920   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 921   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 922 
 923   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 924   // This is the start of  the marking cycle, we're expected all
 925   // threads to have SATB queues with active set to false.
 926   satb_mq_set.set_active_all_threads(true, /* new active value */
 927                                      false /* expected_active */);
 928 
 929   _root_regions.prepare_for_scan();
 930 
 931   // update_g1_committed() will be called at the end of an evac pause
 932   // when marking is on. So, it's also called at the end of the
 933   // initial-mark pause to update the heap end, if the heap expands
 934   // during it. No need to call it here.
 935 }
 936 
 937 /*
 938  * Notice that in the next two methods, we actually leave the STS
 939  * during the barrier sync and join it immediately afterwards. If we
 940  * do not do this, the following deadlock can occur: one thread could
 941  * be in the barrier sync code, waiting for the other thread to also
 942  * sync up, whereas another one could be trying to yield, while also
 943  * waiting for the other threads to sync up too.
 944  *
 945  * Note, however, that this code is also used during remark and in
 946  * this case we should not attempt to leave / enter the STS, otherwise
 947  * we'll either hit an asseert (debug / fastdebug) or deadlock
 948  * (product). So we should only leave / enter the STS if we are
 949  * operating concurrently.
 950  *
 951  * Because the thread that does the sync barrier has left the STS, it
 952  * is possible to be suspended for a Full GC or an evacuation pause
 953  * could occur. This is actually safe, since the entering the sync
 954  * barrier is one of the last things do_marking_step() does, and it
 955  * doesn't manipulate any data structures afterwards.
 956  */
 957 
 958 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 959   if (verbose_low()) {
 960     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 961   }
 962 
 963   if (concurrent()) {
 964     ConcurrentGCThread::stsLeave();
 965   }
 966   _first_overflow_barrier_sync.enter();
 967   if (concurrent()) {
 968     ConcurrentGCThread::stsJoin();
 969   }
 970   // at this point everyone should have synced up and not be doing any
 971   // more work
 972 
 973   if (verbose_low()) {
 974     gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 975   }
 976 
 977   // let the task associated with with worker 0 do this
 978   if (worker_id == 0) {
 979     // task 0 is responsible for clearing the global data structures
 980     // We should be here because of an overflow. During STW we should
 981     // not clear the overflow flag since we rely on it being true when
 982     // we exit this method to abort the pause and restart concurent
 983     // marking.
 984     reset_marking_state(concurrent() /* clear_overflow */);
 985     force_overflow()->update();
 986 
 987     if (G1Log::fine()) {
 988       gclog_or_tty->date_stamp(PrintGCDateStamps);
 989       gclog_or_tty->stamp(PrintGCTimeStamps);
 990       gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 991     }
 992   }
 993 
 994   // after this, each task should reset its own data structures then
 995   // then go into the second barrier
 996 }
 997 
 998 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 999   if (verbose_low()) {
1000     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1001   }
1002 
1003   if (concurrent()) {
1004     ConcurrentGCThread::stsLeave();
1005   }
1006   _second_overflow_barrier_sync.enter();
1007   if (concurrent()) {
1008     ConcurrentGCThread::stsJoin();
1009   }
1010   // at this point everything should be re-initialised and ready to go
1011 
1012   if (verbose_low()) {
1013     gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1014   }
1015 }
1016 
1017 #ifndef PRODUCT
1018 void ForceOverflowSettings::init() {
1019   _num_remaining = G1ConcMarkForceOverflow;
1020   _force = false;
1021   update();
1022 }
1023 
1024 void ForceOverflowSettings::update() {
1025   if (_num_remaining > 0) {
1026     _num_remaining -= 1;
1027     _force = true;
1028   } else {
1029     _force = false;
1030   }
1031 }
1032 
1033 bool ForceOverflowSettings::should_force() {
1034   if (_force) {
1035     _force = false;
1036     return true;
1037   } else {
1038     return false;
1039   }
1040 }
1041 #endif // !PRODUCT
1042 
1043 class CMConcurrentMarkingTask: public AbstractGangTask {
1044 private:
1045   ConcurrentMark*       _cm;
1046   ConcurrentMarkThread* _cmt;
1047 
1048 public:
1049   void work(uint worker_id) {
1050     assert(Thread::current()->is_ConcurrentGC_thread(),
1051            "this should only be done by a conc GC thread");
1052     ResourceMark rm;
1053 
1054     double start_vtime = os::elapsedVTime();
1055 
1056     ConcurrentGCThread::stsJoin();
1057 
1058     assert(worker_id < _cm->active_tasks(), "invariant");
1059     CMTask* the_task = _cm->task(worker_id);
1060     the_task->record_start_time();
1061     if (!_cm->has_aborted()) {
1062       do {
1063         double start_vtime_sec = os::elapsedVTime();
1064         double start_time_sec = os::elapsedTime();
1065         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1066 
1067         the_task->do_marking_step(mark_step_duration_ms,
1068                                   true /* do_stealing    */,
1069                                   true /* do_termination */);
1070 
1071         double end_time_sec = os::elapsedTime();
1072         double end_vtime_sec = os::elapsedVTime();
1073         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1074         double elapsed_time_sec = end_time_sec - start_time_sec;
1075         _cm->clear_has_overflown();
1076 
1077         bool ret = _cm->do_yield_check(worker_id);
1078 
1079         jlong sleep_time_ms;
1080         if (!_cm->has_aborted() && the_task->has_aborted()) {
1081           sleep_time_ms =
1082             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1083           ConcurrentGCThread::stsLeave();
1084           os::sleep(Thread::current(), sleep_time_ms, false);
1085           ConcurrentGCThread::stsJoin();
1086         }
1087         double end_time2_sec = os::elapsedTime();
1088         double elapsed_time2_sec = end_time2_sec - start_time_sec;
1089 
1090 #if 0
1091           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1092                                  "overhead %1.4lf",
1093                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1094                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
1095           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1096                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1097 #endif
1098       } while (!_cm->has_aborted() && the_task->has_aborted());
1099     }
1100     the_task->record_end_time();
1101     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1102 
1103     ConcurrentGCThread::stsLeave();
1104 
1105     double end_vtime = os::elapsedVTime();
1106     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1107   }
1108 
1109   CMConcurrentMarkingTask(ConcurrentMark* cm,
1110                           ConcurrentMarkThread* cmt) :
1111       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1112 
1113   ~CMConcurrentMarkingTask() { }
1114 };
1115 
1116 // Calculates the number of active workers for a concurrent
1117 // phase.
1118 uint ConcurrentMark::calc_parallel_marking_threads() {
1119   if (G1CollectedHeap::use_parallel_gc_threads()) {
1120     uint n_conc_workers = 0;
1121     if (!UseDynamicNumberOfGCThreads ||
1122         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1123          !ForceDynamicNumberOfGCThreads)) {
1124       n_conc_workers = max_parallel_marking_threads();
1125     } else {
1126       n_conc_workers =
1127         AdaptiveSizePolicy::calc_default_active_workers(
1128                                      max_parallel_marking_threads(),
1129                                      1, /* Minimum workers */
1130                                      parallel_marking_threads(),
1131                                      Threads::number_of_non_daemon_threads());
1132       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1133       // that scaling has already gone into "_max_parallel_marking_threads".
1134     }
1135     assert(n_conc_workers > 0, "Always need at least 1");
1136     return n_conc_workers;
1137   }
1138   // If we are not running with any parallel GC threads we will not
1139   // have spawned any marking threads either. Hence the number of
1140   // concurrent workers should be 0.
1141   return 0;
1142 }
1143 
1144 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1145   // Currently, only survivors can be root regions.
1146   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1147   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1148 
1149   const uintx interval = PrefetchScanIntervalInBytes;
1150   HeapWord* curr = hr->bottom();
1151   const HeapWord* end = hr->top();
1152   while (curr < end) {
1153     Prefetch::read(curr, interval);
1154     oop obj = oop(curr);
1155     int size = obj->oop_iterate(&cl);
1156     assert(size == obj->size(), "sanity");
1157     curr += size;
1158   }
1159 }
1160 
1161 class CMRootRegionScanTask : public AbstractGangTask {
1162 private:
1163   ConcurrentMark* _cm;
1164 
1165 public:
1166   CMRootRegionScanTask(ConcurrentMark* cm) :
1167     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1168 
1169   void work(uint worker_id) {
1170     assert(Thread::current()->is_ConcurrentGC_thread(),
1171            "this should only be done by a conc GC thread");
1172 
1173     CMRootRegions* root_regions = _cm->root_regions();
1174     HeapRegion* hr = root_regions->claim_next();
1175     while (hr != NULL) {
1176       _cm->scanRootRegion(hr, worker_id);
1177       hr = root_regions->claim_next();
1178     }
1179   }
1180 };
1181 
1182 void ConcurrentMark::scanRootRegions() {
1183   // scan_in_progress() will have been set to true only if there was
1184   // at least one root region to scan. So, if it's false, we
1185   // should not attempt to do any further work.
1186   if (root_regions()->scan_in_progress()) {
1187     _parallel_marking_threads = calc_parallel_marking_threads();
1188     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1189            "Maximum number of marking threads exceeded");
1190     uint active_workers = MAX2(1U, parallel_marking_threads());
1191 
1192     CMRootRegionScanTask task(this);
1193     if (parallel_marking_threads() > 0) {
1194       _parallel_workers->set_active_workers((int) active_workers);
1195       _parallel_workers->run_task(&task);
1196     } else {
1197       task.work(0);
1198     }
1199 
1200     // It's possible that has_aborted() is true here without actually
1201     // aborting the survivor scan earlier. This is OK as it's
1202     // mainly used for sanity checking.
1203     root_regions()->scan_finished();
1204   }
1205 }
1206 
1207 void ConcurrentMark::markFromRoots() {
1208   // we might be tempted to assert that:
1209   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1210   //        "inconsistent argument?");
1211   // However that wouldn't be right, because it's possible that
1212   // a safepoint is indeed in progress as a younger generation
1213   // stop-the-world GC happens even as we mark in this generation.
1214 
1215   _restart_for_overflow = false;
1216   force_overflow_conc()->init();
1217 
1218   // _g1h has _n_par_threads
1219   _parallel_marking_threads = calc_parallel_marking_threads();
1220   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1221     "Maximum number of marking threads exceeded");
1222 
1223   uint active_workers = MAX2(1U, parallel_marking_threads());
1224 
1225   // Parallel task terminator is set in "set_phase()"
1226   set_phase(active_workers, true /* concurrent */);
1227 
1228   CMConcurrentMarkingTask markingTask(this, cmThread());
1229   if (parallel_marking_threads() > 0) {
1230     _parallel_workers->set_active_workers((int)active_workers);
1231     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1232     // and the decisions on that MT processing is made elsewhere.
1233     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1234     _parallel_workers->run_task(&markingTask);
1235   } else {
1236     markingTask.work(0);
1237   }
1238   print_stats();
1239 }
1240 
1241 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1242   // world is stopped at this checkpoint
1243   assert(SafepointSynchronize::is_at_safepoint(),
1244          "world should be stopped");
1245 
1246   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1247 
1248   // If a full collection has happened, we shouldn't do this.
1249   if (has_aborted()) {
1250     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1251     return;
1252   }
1253 
1254   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1255 
1256   if (VerifyDuringGC) {
1257     HandleMark hm;  // handle scope
1258     gclog_or_tty->print(" VerifyDuringGC:(before)");
1259     Universe::heap()->prepare_for_verify();
1260     Universe::verify(/* silent */ false,
1261                      /* option */ VerifyOption_G1UsePrevMarking);
1262   }
1263 
1264   G1CollectorPolicy* g1p = g1h->g1_policy();
1265   g1p->record_concurrent_mark_remark_start();
1266 
1267   double start = os::elapsedTime();
1268 
1269   checkpointRootsFinalWork();
1270 
1271   double mark_work_end = os::elapsedTime();
1272 
1273   weakRefsWork(clear_all_soft_refs);
1274 
1275   if (has_overflown()) {
1276     // Oops.  We overflowed.  Restart concurrent marking.
1277     _restart_for_overflow = true;
1278     // Clear the marking state because we will be restarting
1279     // marking due to overflowing the global mark stack.
1280     reset_marking_state();
1281     if (G1TraceMarkStackOverflow) {
1282       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1283     }
1284   } else {
1285     // Aggregate the per-task counting data that we have accumulated
1286     // while marking.
1287     aggregate_count_data();
1288 
1289     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1290     // We're done with marking.
1291     // This is the end of  the marking cycle, we're expected all
1292     // threads to have SATB queues with active set to true.
1293     satb_mq_set.set_active_all_threads(false, /* new active value */
1294                                        true /* expected_active */);
1295 
1296     if (VerifyDuringGC) {
1297       HandleMark hm;  // handle scope
1298       gclog_or_tty->print(" VerifyDuringGC:(after)");
1299       Universe::heap()->prepare_for_verify();
1300       Universe::verify(/* silent */ false,
1301                        /* option */ VerifyOption_G1UseNextMarking);
1302     }
1303     assert(!restart_for_overflow(), "sanity");
1304     // Completely reset the marking state since marking completed
1305     set_non_marking_state();
1306   }
1307 
1308   // Expand the marking stack, if we have to and if we can.
1309   if (_markStack.should_expand()) {
1310     _markStack.expand();
1311   }
1312 
1313 #if VERIFY_OBJS_PROCESSED
1314   _scan_obj_cl.objs_processed = 0;
1315   ThreadLocalObjQueue::objs_enqueued = 0;
1316 #endif
1317 
1318   // Statistics
1319   double now = os::elapsedTime();
1320   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1321   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1322   _remark_times.add((now - start) * 1000.0);
1323 
1324   g1p->record_concurrent_mark_remark_end();
1325 }
1326 
1327 // Base class of the closures that finalize and verify the
1328 // liveness counting data.
1329 class CMCountDataClosureBase: public HeapRegionClosure {
1330 protected:
1331   G1CollectedHeap* _g1h;
1332   ConcurrentMark* _cm;
1333   CardTableModRefBS* _ct_bs;
1334 
1335   BitMap* _region_bm;
1336   BitMap* _card_bm;
1337 
1338   // Takes a region that's not empty (i.e., it has at least one
1339   // live object in it and sets its corresponding bit on the region
1340   // bitmap to 1. If the region is "starts humongous" it will also set
1341   // to 1 the bits on the region bitmap that correspond to its
1342   // associated "continues humongous" regions.
1343   void set_bit_for_region(HeapRegion* hr) {
1344     assert(!hr->continuesHumongous(), "should have filtered those out");
1345 
1346     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1347     if (!hr->startsHumongous()) {
1348       // Normal (non-humongous) case: just set the bit.
1349       _region_bm->par_at_put(index, true);
1350     } else {
1351       // Starts humongous case: calculate how many regions are part of
1352       // this humongous region and then set the bit range.
1353       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1354       _region_bm->par_at_put_range(index, end_index, true);
1355     }
1356   }
1357 
1358 public:
1359   CMCountDataClosureBase(G1CollectedHeap* g1h,
1360                          BitMap* region_bm, BitMap* card_bm):
1361     _g1h(g1h), _cm(g1h->concurrent_mark()),
1362     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1363     _region_bm(region_bm), _card_bm(card_bm) { }
1364 };
1365 
1366 // Closure that calculates the # live objects per region. Used
1367 // for verification purposes during the cleanup pause.
1368 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1369   CMBitMapRO* _bm;
1370   size_t _region_marked_bytes;
1371 
1372 public:
1373   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1374                          BitMap* region_bm, BitMap* card_bm) :
1375     CMCountDataClosureBase(g1h, region_bm, card_bm),
1376     _bm(bm), _region_marked_bytes(0) { }
1377 
1378   bool doHeapRegion(HeapRegion* hr) {
1379 
1380     if (hr->continuesHumongous()) {
1381       // We will ignore these here and process them when their
1382       // associated "starts humongous" region is processed (see
1383       // set_bit_for_heap_region()). Note that we cannot rely on their
1384       // associated "starts humongous" region to have their bit set to
1385       // 1 since, due to the region chunking in the parallel region
1386       // iteration, a "continues humongous" region might be visited
1387       // before its associated "starts humongous".
1388       return false;
1389     }
1390 
1391     HeapWord* ntams = hr->next_top_at_mark_start();
1392     HeapWord* start = hr->bottom();
1393 
1394     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1395            err_msg("Preconditions not met - "
1396                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1397                    start, ntams, hr->end()));
1398 
1399     // Find the first marked object at or after "start".
1400     start = _bm->getNextMarkedWordAddress(start, ntams);
1401 
1402     size_t marked_bytes = 0;
1403 
1404     while (start < ntams) {
1405       oop obj = oop(start);
1406       int obj_sz = obj->size();
1407       HeapWord* obj_end = start + obj_sz;
1408 
1409       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1410       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1411 
1412       // Note: if we're looking at the last region in heap - obj_end
1413       // could be actually just beyond the end of the heap; end_idx
1414       // will then correspond to a (non-existent) card that is also
1415       // just beyond the heap.
1416       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1417         // end of object is not card aligned - increment to cover
1418         // all the cards spanned by the object
1419         end_idx += 1;
1420       }
1421 
1422       // Set the bits in the card BM for the cards spanned by this object.
1423       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1424 
1425       // Add the size of this object to the number of marked bytes.
1426       marked_bytes += (size_t)obj_sz * HeapWordSize;
1427 
1428       // Find the next marked object after this one.
1429       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1430     }
1431 
1432     // Mark the allocated-since-marking portion...
1433     HeapWord* top = hr->top();
1434     if (ntams < top) {
1435       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1436       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1437 
1438       // Note: if we're looking at the last region in heap - top
1439       // could be actually just beyond the end of the heap; end_idx
1440       // will then correspond to a (non-existent) card that is also
1441       // just beyond the heap.
1442       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1443         // end of object is not card aligned - increment to cover
1444         // all the cards spanned by the object
1445         end_idx += 1;
1446       }
1447       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1448 
1449       // This definitely means the region has live objects.
1450       set_bit_for_region(hr);
1451     }
1452 
1453     // Update the live region bitmap.
1454     if (marked_bytes > 0) {
1455       set_bit_for_region(hr);
1456     }
1457 
1458     // Set the marked bytes for the current region so that
1459     // it can be queried by a calling verificiation routine
1460     _region_marked_bytes = marked_bytes;
1461 
1462     return false;
1463   }
1464 
1465   size_t region_marked_bytes() const { return _region_marked_bytes; }
1466 };
1467 
1468 // Heap region closure used for verifying the counting data
1469 // that was accumulated concurrently and aggregated during
1470 // the remark pause. This closure is applied to the heap
1471 // regions during the STW cleanup pause.
1472 
1473 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1474   G1CollectedHeap* _g1h;
1475   ConcurrentMark* _cm;
1476   CalcLiveObjectsClosure _calc_cl;
1477   BitMap* _region_bm;   // Region BM to be verified
1478   BitMap* _card_bm;     // Card BM to be verified
1479   bool _verbose;        // verbose output?
1480 
1481   BitMap* _exp_region_bm; // Expected Region BM values
1482   BitMap* _exp_card_bm;   // Expected card BM values
1483 
1484   int _failures;
1485 
1486 public:
1487   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1488                                 BitMap* region_bm,
1489                                 BitMap* card_bm,
1490                                 BitMap* exp_region_bm,
1491                                 BitMap* exp_card_bm,
1492                                 bool verbose) :
1493     _g1h(g1h), _cm(g1h->concurrent_mark()),
1494     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1495     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1496     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1497     _failures(0) { }
1498 
1499   int failures() const { return _failures; }
1500 
1501   bool doHeapRegion(HeapRegion* hr) {
1502     if (hr->continuesHumongous()) {
1503       // We will ignore these here and process them when their
1504       // associated "starts humongous" region is processed (see
1505       // set_bit_for_heap_region()). Note that we cannot rely on their
1506       // associated "starts humongous" region to have their bit set to
1507       // 1 since, due to the region chunking in the parallel region
1508       // iteration, a "continues humongous" region might be visited
1509       // before its associated "starts humongous".
1510       return false;
1511     }
1512 
1513     int failures = 0;
1514 
1515     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1516     // this region and set the corresponding bits in the expected region
1517     // and card bitmaps.
1518     bool res = _calc_cl.doHeapRegion(hr);
1519     assert(res == false, "should be continuing");
1520 
1521     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1522                     Mutex::_no_safepoint_check_flag);
1523 
1524     // Verify the marked bytes for this region.
1525     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1526     size_t act_marked_bytes = hr->next_marked_bytes();
1527 
1528     // We're not OK if expected marked bytes > actual marked bytes. It means
1529     // we have missed accounting some objects during the actual marking.
1530     if (exp_marked_bytes > act_marked_bytes) {
1531       if (_verbose) {
1532         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1533                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1534                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1535       }
1536       failures += 1;
1537     }
1538 
1539     // Verify the bit, for this region, in the actual and expected
1540     // (which was just calculated) region bit maps.
1541     // We're not OK if the bit in the calculated expected region
1542     // bitmap is set and the bit in the actual region bitmap is not.
1543     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1544 
1545     bool expected = _exp_region_bm->at(index);
1546     bool actual = _region_bm->at(index);
1547     if (expected && !actual) {
1548       if (_verbose) {
1549         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1550                                "expected: %s, actual: %s",
1551                                hr->hrs_index(),
1552                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1553       }
1554       failures += 1;
1555     }
1556 
1557     // Verify that the card bit maps for the cards spanned by the current
1558     // region match. We have an error if we have a set bit in the expected
1559     // bit map and the corresponding bit in the actual bitmap is not set.
1560 
1561     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1562     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1563 
1564     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1565       expected = _exp_card_bm->at(i);
1566       actual = _card_bm->at(i);
1567 
1568       if (expected && !actual) {
1569         if (_verbose) {
1570           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1571                                  "expected: %s, actual: %s",
1572                                  hr->hrs_index(), i,
1573                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1574         }
1575         failures += 1;
1576       }
1577     }
1578 
1579     if (failures > 0 && _verbose)  {
1580       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1581                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1582                              HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1583                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1584     }
1585 
1586     _failures += failures;
1587 
1588     // We could stop iteration over the heap when we
1589     // find the first violating region by returning true.
1590     return false;
1591   }
1592 };
1593 
1594 
1595 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1596 protected:
1597   G1CollectedHeap* _g1h;
1598   ConcurrentMark* _cm;
1599   BitMap* _actual_region_bm;
1600   BitMap* _actual_card_bm;
1601 
1602   uint    _n_workers;
1603 
1604   BitMap* _expected_region_bm;
1605   BitMap* _expected_card_bm;
1606 
1607   int  _failures;
1608   bool _verbose;
1609 
1610 public:
1611   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1612                             BitMap* region_bm, BitMap* card_bm,
1613                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1614     : AbstractGangTask("G1 verify final counting"),
1615       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1616       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1617       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1618       _failures(0), _verbose(false),
1619       _n_workers(0) {
1620     assert(VerifyDuringGC, "don't call this otherwise");
1621 
1622     // Use the value already set as the number of active threads
1623     // in the call to run_task().
1624     if (G1CollectedHeap::use_parallel_gc_threads()) {
1625       assert( _g1h->workers()->active_workers() > 0,
1626         "Should have been previously set");
1627       _n_workers = _g1h->workers()->active_workers();
1628     } else {
1629       _n_workers = 1;
1630     }
1631 
1632     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1633     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1634 
1635     _verbose = _cm->verbose_medium();
1636   }
1637 
1638   void work(uint worker_id) {
1639     assert(worker_id < _n_workers, "invariant");
1640 
1641     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1642                                             _actual_region_bm, _actual_card_bm,
1643                                             _expected_region_bm,
1644                                             _expected_card_bm,
1645                                             _verbose);
1646 
1647     if (G1CollectedHeap::use_parallel_gc_threads()) {
1648       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1649                                             worker_id,
1650                                             _n_workers,
1651                                             HeapRegion::VerifyCountClaimValue);
1652     } else {
1653       _g1h->heap_region_iterate(&verify_cl);
1654     }
1655 
1656     Atomic::add(verify_cl.failures(), &_failures);
1657   }
1658 
1659   int failures() const { return _failures; }
1660 };
1661 
1662 // Closure that finalizes the liveness counting data.
1663 // Used during the cleanup pause.
1664 // Sets the bits corresponding to the interval [NTAMS, top]
1665 // (which contains the implicitly live objects) in the
1666 // card liveness bitmap. Also sets the bit for each region,
1667 // containing live data, in the region liveness bitmap.
1668 
1669 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1670  public:
1671   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1672                               BitMap* region_bm,
1673                               BitMap* card_bm) :
1674     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1675 
1676   bool doHeapRegion(HeapRegion* hr) {
1677 
1678     if (hr->continuesHumongous()) {
1679       // We will ignore these here and process them when their
1680       // associated "starts humongous" region is processed (see
1681       // set_bit_for_heap_region()). Note that we cannot rely on their
1682       // associated "starts humongous" region to have their bit set to
1683       // 1 since, due to the region chunking in the parallel region
1684       // iteration, a "continues humongous" region might be visited
1685       // before its associated "starts humongous".
1686       return false;
1687     }
1688 
1689     HeapWord* ntams = hr->next_top_at_mark_start();
1690     HeapWord* top   = hr->top();
1691 
1692     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1693 
1694     // Mark the allocated-since-marking portion...
1695     if (ntams < top) {
1696       // This definitely means the region has live objects.
1697       set_bit_for_region(hr);
1698 
1699       // Now set the bits in the card bitmap for [ntams, top)
1700       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1701       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1702 
1703       // Note: if we're looking at the last region in heap - top
1704       // could be actually just beyond the end of the heap; end_idx
1705       // will then correspond to a (non-existent) card that is also
1706       // just beyond the heap.
1707       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1708         // end of object is not card aligned - increment to cover
1709         // all the cards spanned by the object
1710         end_idx += 1;
1711       }
1712 
1713       assert(end_idx <= _card_bm->size(),
1714              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1715                      end_idx, _card_bm->size()));
1716       assert(start_idx < _card_bm->size(),
1717              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1718                      start_idx, _card_bm->size()));
1719 
1720       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1721     }
1722 
1723     // Set the bit for the region if it contains live data
1724     if (hr->next_marked_bytes() > 0) {
1725       set_bit_for_region(hr);
1726     }
1727 
1728     return false;
1729   }
1730 };
1731 
1732 class G1ParFinalCountTask: public AbstractGangTask {
1733 protected:
1734   G1CollectedHeap* _g1h;
1735   ConcurrentMark* _cm;
1736   BitMap* _actual_region_bm;
1737   BitMap* _actual_card_bm;
1738 
1739   uint    _n_workers;
1740 
1741 public:
1742   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1743     : AbstractGangTask("G1 final counting"),
1744       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1745       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1746       _n_workers(0) {
1747     // Use the value already set as the number of active threads
1748     // in the call to run_task().
1749     if (G1CollectedHeap::use_parallel_gc_threads()) {
1750       assert( _g1h->workers()->active_workers() > 0,
1751         "Should have been previously set");
1752       _n_workers = _g1h->workers()->active_workers();
1753     } else {
1754       _n_workers = 1;
1755     }
1756   }
1757 
1758   void work(uint worker_id) {
1759     assert(worker_id < _n_workers, "invariant");
1760 
1761     FinalCountDataUpdateClosure final_update_cl(_g1h,
1762                                                 _actual_region_bm,
1763                                                 _actual_card_bm);
1764 
1765     if (G1CollectedHeap::use_parallel_gc_threads()) {
1766       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1767                                             worker_id,
1768                                             _n_workers,
1769                                             HeapRegion::FinalCountClaimValue);
1770     } else {
1771       _g1h->heap_region_iterate(&final_update_cl);
1772     }
1773   }
1774 };
1775 
1776 class G1ParNoteEndTask;
1777 
1778 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1779   G1CollectedHeap* _g1;
1780   int _worker_num;
1781   size_t _max_live_bytes;
1782   uint _regions_claimed;
1783   size_t _freed_bytes;
1784   FreeRegionList* _local_cleanup_list;
1785   OldRegionSet* _old_proxy_set;
1786   HumongousRegionSet* _humongous_proxy_set;
1787   HRRSCleanupTask* _hrrs_cleanup_task;
1788   double _claimed_region_time;
1789   double _max_region_time;
1790 
1791 public:
1792   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1793                              int worker_num,
1794                              FreeRegionList* local_cleanup_list,
1795                              OldRegionSet* old_proxy_set,
1796                              HumongousRegionSet* humongous_proxy_set,
1797                              HRRSCleanupTask* hrrs_cleanup_task) :
1798     _g1(g1), _worker_num(worker_num),
1799     _max_live_bytes(0), _regions_claimed(0),
1800     _freed_bytes(0),
1801     _claimed_region_time(0.0), _max_region_time(0.0),
1802     _local_cleanup_list(local_cleanup_list),
1803     _old_proxy_set(old_proxy_set),
1804     _humongous_proxy_set(humongous_proxy_set),
1805     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1806 
1807   size_t freed_bytes() { return _freed_bytes; }
1808 
1809   bool doHeapRegion(HeapRegion *hr) {
1810     if (hr->continuesHumongous()) {
1811       return false;
1812     }
1813     // We use a claim value of zero here because all regions
1814     // were claimed with value 1 in the FinalCount task.
1815     _g1->reset_gc_time_stamps(hr);
1816     double start = os::elapsedTime();
1817     _regions_claimed++;
1818     hr->note_end_of_marking();
1819     _max_live_bytes += hr->max_live_bytes();
1820     _g1->free_region_if_empty(hr,
1821                               &_freed_bytes,
1822                               _local_cleanup_list,
1823                               _old_proxy_set,
1824                               _humongous_proxy_set,
1825                               _hrrs_cleanup_task,
1826                               true /* par */);
1827     double region_time = (os::elapsedTime() - start);
1828     _claimed_region_time += region_time;
1829     if (region_time > _max_region_time) {
1830       _max_region_time = region_time;
1831     }
1832     return false;
1833   }
1834 
1835   size_t max_live_bytes() { return _max_live_bytes; }
1836   uint regions_claimed() { return _regions_claimed; }
1837   double claimed_region_time_sec() { return _claimed_region_time; }
1838   double max_region_time_sec() { return _max_region_time; }
1839 };
1840 
1841 class G1ParNoteEndTask: public AbstractGangTask {
1842   friend class G1NoteEndOfConcMarkClosure;
1843 
1844 protected:
1845   G1CollectedHeap* _g1h;
1846   size_t _max_live_bytes;
1847   size_t _freed_bytes;
1848   FreeRegionList* _cleanup_list;
1849 
1850 public:
1851   G1ParNoteEndTask(G1CollectedHeap* g1h,
1852                    FreeRegionList* cleanup_list) :
1853     AbstractGangTask("G1 note end"), _g1h(g1h),
1854     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1855 
1856   void work(uint worker_id) {
1857     double start = os::elapsedTime();
1858     FreeRegionList local_cleanup_list("Local Cleanup List");
1859     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
1860     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
1861     HRRSCleanupTask hrrs_cleanup_task;
1862     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1863                                            &old_proxy_set,
1864                                            &humongous_proxy_set,
1865                                            &hrrs_cleanup_task);
1866     if (G1CollectedHeap::use_parallel_gc_threads()) {
1867       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1868                                             _g1h->workers()->active_workers(),
1869                                             HeapRegion::NoteEndClaimValue);
1870     } else {
1871       _g1h->heap_region_iterate(&g1_note_end);
1872     }
1873     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1874 
1875     // Now update the lists
1876     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
1877                                             NULL /* free_list */,
1878                                             &old_proxy_set,
1879                                             &humongous_proxy_set,
1880                                             true /* par */);
1881     {
1882       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1883       _max_live_bytes += g1_note_end.max_live_bytes();
1884       _freed_bytes += g1_note_end.freed_bytes();
1885 
1886       // If we iterate over the global cleanup list at the end of
1887       // cleanup to do this printing we will not guarantee to only
1888       // generate output for the newly-reclaimed regions (the list
1889       // might not be empty at the beginning of cleanup; we might
1890       // still be working on its previous contents). So we do the
1891       // printing here, before we append the new regions to the global
1892       // cleanup list.
1893 
1894       G1HRPrinter* hr_printer = _g1h->hr_printer();
1895       if (hr_printer->is_active()) {
1896         HeapRegionLinkedListIterator iter(&local_cleanup_list);
1897         while (iter.more_available()) {
1898           HeapRegion* hr = iter.get_next();
1899           hr_printer->cleanup(hr);
1900         }
1901       }
1902 
1903       _cleanup_list->add_as_tail(&local_cleanup_list);
1904       assert(local_cleanup_list.is_empty(), "post-condition");
1905 
1906       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1907     }
1908   }
1909   size_t max_live_bytes() { return _max_live_bytes; }
1910   size_t freed_bytes() { return _freed_bytes; }
1911 };
1912 
1913 class G1ParScrubRemSetTask: public AbstractGangTask {
1914 protected:
1915   G1RemSet* _g1rs;
1916   BitMap* _region_bm;
1917   BitMap* _card_bm;
1918 public:
1919   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1920                        BitMap* region_bm, BitMap* card_bm) :
1921     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1922     _region_bm(region_bm), _card_bm(card_bm) { }
1923 
1924   void work(uint worker_id) {
1925     if (G1CollectedHeap::use_parallel_gc_threads()) {
1926       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1927                        HeapRegion::ScrubRemSetClaimValue);
1928     } else {
1929       _g1rs->scrub(_region_bm, _card_bm);
1930     }
1931   }
1932 
1933 };
1934 
1935 void ConcurrentMark::cleanup() {
1936   // world is stopped at this checkpoint
1937   assert(SafepointSynchronize::is_at_safepoint(),
1938          "world should be stopped");
1939   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1940 
1941   // If a full collection has happened, we shouldn't do this.
1942   if (has_aborted()) {
1943     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1944     return;
1945   }
1946 
1947   HRSPhaseSetter x(HRSPhaseCleanup);
1948   g1h->verify_region_sets_optional();
1949 
1950   if (VerifyDuringGC) {
1951     HandleMark hm;  // handle scope
1952     gclog_or_tty->print(" VerifyDuringGC:(before)");
1953     Universe::heap()->prepare_for_verify();
1954     Universe::verify(/* silent */ false,
1955                      /* option */ VerifyOption_G1UsePrevMarking);
1956   }
1957 
1958   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1959   g1p->record_concurrent_mark_cleanup_start();
1960 
1961   double start = os::elapsedTime();
1962 
1963   HeapRegionRemSet::reset_for_cleanup_tasks();
1964 
1965   uint n_workers;
1966 
1967   // Do counting once more with the world stopped for good measure.
1968   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1969 
1970   if (G1CollectedHeap::use_parallel_gc_threads()) {
1971    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1972            "sanity check");
1973 
1974     g1h->set_par_threads();
1975     n_workers = g1h->n_par_threads();
1976     assert(g1h->n_par_threads() == n_workers,
1977            "Should not have been reset");
1978     g1h->workers()->run_task(&g1_par_count_task);
1979     // Done with the parallel phase so reset to 0.
1980     g1h->set_par_threads(0);
1981 
1982     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
1983            "sanity check");
1984   } else {
1985     n_workers = 1;
1986     g1_par_count_task.work(0);
1987   }
1988 
1989   if (VerifyDuringGC) {
1990     // Verify that the counting data accumulated during marking matches
1991     // that calculated by walking the marking bitmap.
1992 
1993     // Bitmaps to hold expected values
1994     BitMap expected_region_bm(_region_bm.size(), false);
1995     BitMap expected_card_bm(_card_bm.size(), false);
1996 
1997     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1998                                                  &_region_bm,
1999                                                  &_card_bm,
2000                                                  &expected_region_bm,
2001                                                  &expected_card_bm);
2002 
2003     if (G1CollectedHeap::use_parallel_gc_threads()) {
2004       g1h->set_par_threads((int)n_workers);
2005       g1h->workers()->run_task(&g1_par_verify_task);
2006       // Done with the parallel phase so reset to 0.
2007       g1h->set_par_threads(0);
2008 
2009       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2010              "sanity check");
2011     } else {
2012       g1_par_verify_task.work(0);
2013     }
2014 
2015     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2016   }
2017 
2018   size_t start_used_bytes = g1h->used();
2019   g1h->set_marking_complete();
2020 
2021   double count_end = os::elapsedTime();
2022   double this_final_counting_time = (count_end - start);
2023   _total_counting_time += this_final_counting_time;
2024 
2025   if (G1PrintRegionLivenessInfo) {
2026     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2027     _g1h->heap_region_iterate(&cl);
2028   }
2029 
2030   // Install newly created mark bitMap as "prev".
2031   swapMarkBitMaps();
2032 
2033   g1h->reset_gc_time_stamp();
2034 
2035   // Note end of marking in all heap regions.
2036   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2037   if (G1CollectedHeap::use_parallel_gc_threads()) {
2038     g1h->set_par_threads((int)n_workers);
2039     g1h->workers()->run_task(&g1_par_note_end_task);
2040     g1h->set_par_threads(0);
2041 
2042     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2043            "sanity check");
2044   } else {
2045     g1_par_note_end_task.work(0);
2046   }
2047   g1h->check_gc_time_stamps();
2048 
2049   if (!cleanup_list_is_empty()) {
2050     // The cleanup list is not empty, so we'll have to process it
2051     // concurrently. Notify anyone else that might be wanting free
2052     // regions that there will be more free regions coming soon.
2053     g1h->set_free_regions_coming();
2054   }
2055 
2056   // call below, since it affects the metric by which we sort the heap
2057   // regions.
2058   if (G1ScrubRemSets) {
2059     double rs_scrub_start = os::elapsedTime();
2060     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2061     if (G1CollectedHeap::use_parallel_gc_threads()) {
2062       g1h->set_par_threads((int)n_workers);
2063       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2064       g1h->set_par_threads(0);
2065 
2066       assert(g1h->check_heap_region_claim_values(
2067                                             HeapRegion::ScrubRemSetClaimValue),
2068              "sanity check");
2069     } else {
2070       g1_par_scrub_rs_task.work(0);
2071     }
2072 
2073     double rs_scrub_end = os::elapsedTime();
2074     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2075     _total_rs_scrub_time += this_rs_scrub_time;
2076   }
2077 
2078   // this will also free any regions totally full of garbage objects,
2079   // and sort the regions.
2080   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2081 
2082   // Statistics.
2083   double end = os::elapsedTime();
2084   _cleanup_times.add((end - start) * 1000.0);
2085 
2086   if (G1Log::fine()) {
2087     g1h->print_size_transition(gclog_or_tty,
2088                                start_used_bytes,
2089                                g1h->used(),
2090                                g1h->capacity());
2091   }
2092 
2093   // Clean up will have freed any regions completely full of garbage.
2094   // Update the soft reference policy with the new heap occupancy.
2095   Universe::update_heap_info_at_gc();
2096 
2097   // We need to make this be a "collection" so any collection pause that
2098   // races with it goes around and waits for completeCleanup to finish.
2099   g1h->increment_total_collections();
2100 
2101   // We reclaimed old regions so we should calculate the sizes to make
2102   // sure we update the old gen/space data.
2103   g1h->g1mm()->update_sizes();
2104 
2105   if (VerifyDuringGC) {
2106     HandleMark hm;  // handle scope
2107     gclog_or_tty->print(" VerifyDuringGC:(after)");
2108     Universe::heap()->prepare_for_verify();
2109     Universe::verify(/* silent */ false,
2110                      /* option */ VerifyOption_G1UsePrevMarking);
2111   }
2112 
2113   g1h->verify_region_sets_optional();
2114 }
2115 
2116 void ConcurrentMark::completeCleanup() {
2117   if (has_aborted()) return;
2118 
2119   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2120 
2121   _cleanup_list.verify_optional();
2122   FreeRegionList tmp_free_list("Tmp Free List");
2123 
2124   if (G1ConcRegionFreeingVerbose) {
2125     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2126                            "cleanup list has %u entries",
2127                            _cleanup_list.length());
2128   }
2129 
2130   // Noone else should be accessing the _cleanup_list at this point,
2131   // so it's not necessary to take any locks
2132   while (!_cleanup_list.is_empty()) {
2133     HeapRegion* hr = _cleanup_list.remove_head();
2134     assert(hr != NULL, "the list was not empty");
2135     hr->par_clear();
2136     tmp_free_list.add_as_tail(hr);
2137 
2138     // Instead of adding one region at a time to the secondary_free_list,
2139     // we accumulate them in the local list and move them a few at a
2140     // time. This also cuts down on the number of notify_all() calls
2141     // we do during this process. We'll also append the local list when
2142     // _cleanup_list is empty (which means we just removed the last
2143     // region from the _cleanup_list).
2144     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2145         _cleanup_list.is_empty()) {
2146       if (G1ConcRegionFreeingVerbose) {
2147         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2148                                "appending %u entries to the secondary_free_list, "
2149                                "cleanup list still has %u entries",
2150                                tmp_free_list.length(),
2151                                _cleanup_list.length());
2152       }
2153 
2154       {
2155         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2156         g1h->secondary_free_list_add_as_tail(&tmp_free_list);
2157         SecondaryFreeList_lock->notify_all();
2158       }
2159 
2160       if (G1StressConcRegionFreeing) {
2161         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2162           os::sleep(Thread::current(), (jlong) 1, false);
2163         }
2164       }
2165     }
2166   }
2167   assert(tmp_free_list.is_empty(), "post-condition");
2168 }
2169 
2170 // Support closures for reference procssing in G1
2171 
2172 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2173   HeapWord* addr = (HeapWord*)obj;
2174   return addr != NULL &&
2175          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2176 }
2177 
2178 class G1CMKeepAliveClosure: public ExtendedOopClosure {
2179   G1CollectedHeap* _g1;
2180   ConcurrentMark*  _cm;
2181  public:
2182   G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) :
2183     _g1(g1), _cm(cm) {
2184     assert(Thread::current()->is_VM_thread(), "otherwise fix worker id");
2185   }
2186 
2187   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2188   virtual void do_oop(      oop* p) { do_oop_work(p); }
2189 
2190   template <class T> void do_oop_work(T* p) {
2191     oop obj = oopDesc::load_decode_heap_oop(p);
2192     HeapWord* addr = (HeapWord*)obj;
2193 
2194     if (_cm->verbose_high()) {
2195       gclog_or_tty->print_cr("\t[0] we're looking at location "
2196                              "*"PTR_FORMAT" = "PTR_FORMAT,
2197                              p, (void*) obj);
2198     }
2199 
2200     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
2201       _cm->mark_and_count(obj);
2202       _cm->mark_stack_push(obj);
2203     }
2204   }
2205 };
2206 
2207 class G1CMDrainMarkingStackClosure: public VoidClosure {
2208   ConcurrentMark*               _cm;
2209   CMMarkStack*                  _markStack;
2210   G1CMKeepAliveClosure*         _oopClosure;
2211  public:
2212   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack,
2213                                G1CMKeepAliveClosure* oopClosure) :
2214     _cm(cm),
2215     _markStack(markStack),
2216     _oopClosure(oopClosure) { }
2217 
2218   void do_void() {
2219     _markStack->drain(_oopClosure, _cm->nextMarkBitMap(), false);
2220   }
2221 };
2222 
2223 // 'Keep Alive' closure used by parallel reference processing.
2224 // An instance of this closure is used in the parallel reference processing
2225 // code rather than an instance of G1CMKeepAliveClosure. We could have used
2226 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
2227 // placed on to discovered ref lists once so we can mark and push with no
2228 // need to check whether the object has already been marked. Using the
2229 // G1CMKeepAliveClosure would mean, however, having all the worker threads
2230 // operating on the global mark stack. This means that an individual
2231 // worker would be doing lock-free pushes while it processes its own
2232 // discovered ref list followed by drain call. If the discovered ref lists
2233 // are unbalanced then this could cause interference with the other
2234 // workers. Using a CMTask (and its embedded local data structures)
2235 // avoids that potential interference.
2236 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
2237   ConcurrentMark*  _cm;
2238   CMTask*          _task;
2239   int              _ref_counter_limit;
2240   int              _ref_counter;
2241  public:
2242   G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
2243     _cm(cm), _task(task),
2244     _ref_counter_limit(G1RefProcDrainInterval) {
2245     assert(_ref_counter_limit > 0, "sanity");
2246     _ref_counter = _ref_counter_limit;
2247   }
2248 
2249   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2250   virtual void do_oop(      oop* p) { do_oop_work(p); }
2251 
2252   template <class T> void do_oop_work(T* p) {
2253     if (!_cm->has_overflown()) {
2254       oop obj = oopDesc::load_decode_heap_oop(p);
2255       if (_cm->verbose_high()) {
2256         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2257                                "*"PTR_FORMAT" = "PTR_FORMAT,
2258                                _task->worker_id(), p, (void*) obj);
2259       }
2260 
2261       _task->deal_with_reference(obj);
2262       _ref_counter--;
2263 
2264       if (_ref_counter == 0) {
2265         // We have dealt with _ref_counter_limit references, pushing them and objects
2266         // reachable from them on to the local stack (and possibly the global stack).
2267         // Call do_marking_step() to process these entries. We call the routine in a
2268         // loop, which we'll exit if there's nothing more to do (i.e. we're done
2269         // with the entries that we've pushed as a result of the deal_with_reference
2270         // calls above) or we overflow.
2271         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
2272         // while there may still be some work to do. (See the comment at the
2273         // beginning of CMTask::do_marking_step() for those conditions - one of which
2274         // is reaching the specified time target.) It is only when
2275         // CMTask::do_marking_step() returns without setting the has_aborted() flag
2276         // that the marking has completed.
2277         do {
2278           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2279           _task->do_marking_step(mark_step_duration_ms,
2280                                  false /* do_stealing    */,
2281                                  false /* do_termination */);
2282         } while (_task->has_aborted() && !_cm->has_overflown());
2283         _ref_counter = _ref_counter_limit;
2284       }
2285     } else {
2286       if (_cm->verbose_high()) {
2287          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2288       }
2289     }
2290   }
2291 };
2292 
2293 class G1CMParDrainMarkingStackClosure: public VoidClosure {
2294   ConcurrentMark* _cm;
2295   CMTask* _task;
2296  public:
2297   G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
2298     _cm(cm), _task(task) { }
2299 
2300   void do_void() {
2301     do {
2302       if (_cm->verbose_high()) {
2303         gclog_or_tty->print_cr("\t[%u] Drain: Calling do marking_step",
2304                                _task->worker_id());
2305       }
2306 
2307       // We call CMTask::do_marking_step() to completely drain the local and
2308       // global marking stacks. The routine is called in a loop, which we'll
2309       // exit if there's nothing more to do (i.e. we'completely drained the
2310       // entries that were pushed as a result of applying the
2311       // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
2312       // lists above) or we overflow the global marking stack.
2313       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
2314       // while there may still be some work to do. (See the comment at the
2315       // beginning of CMTask::do_marking_step() for those conditions - one of which
2316       // is reaching the specified time target.) It is only when
2317       // CMTask::do_marking_step() returns without setting the has_aborted() flag
2318       // that the marking has completed.
2319 
2320       _task->do_marking_step(1000000000.0 /* something very large */,
2321                              true /* do_stealing    */,
2322                              true /* do_termination */);
2323     } while (_task->has_aborted() && !_cm->has_overflown());
2324   }
2325 };
2326 
2327 // Implementation of AbstractRefProcTaskExecutor for parallel
2328 // reference processing at the end of G1 concurrent marking
2329 
2330 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2331 private:
2332   G1CollectedHeap* _g1h;
2333   ConcurrentMark*  _cm;
2334   WorkGang*        _workers;
2335   int              _active_workers;
2336 
2337 public:
2338   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2339                         ConcurrentMark* cm,
2340                         WorkGang* workers,
2341                         int n_workers) :
2342     _g1h(g1h), _cm(cm),
2343     _workers(workers), _active_workers(n_workers) { }
2344 
2345   // Executes the given task using concurrent marking worker threads.
2346   virtual void execute(ProcessTask& task);
2347   virtual void execute(EnqueueTask& task);
2348 };
2349 
2350 class G1CMRefProcTaskProxy: public AbstractGangTask {
2351   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2352   ProcessTask&     _proc_task;
2353   G1CollectedHeap* _g1h;
2354   ConcurrentMark*  _cm;
2355 
2356 public:
2357   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2358                      G1CollectedHeap* g1h,
2359                      ConcurrentMark* cm) :
2360     AbstractGangTask("Process reference objects in parallel"),
2361     _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
2362 
2363   virtual void work(uint worker_id) {
2364     CMTask* marking_task = _cm->task(worker_id);
2365     G1CMIsAliveClosure g1_is_alive(_g1h);
2366     G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
2367     G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2368 
2369     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2370   }
2371 };
2372 
2373 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2374   assert(_workers != NULL, "Need parallel worker threads.");
2375 
2376   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2377 
2378   // We need to reset the phase for each task execution so that
2379   // the termination protocol of CMTask::do_marking_step works.
2380   _cm->set_phase(_active_workers, false /* concurrent */);
2381   _g1h->set_par_threads(_active_workers);
2382   _workers->run_task(&proc_task_proxy);
2383   _g1h->set_par_threads(0);
2384 }
2385 
2386 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2387   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2388   EnqueueTask& _enq_task;
2389 
2390 public:
2391   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2392     AbstractGangTask("Enqueue reference objects in parallel"),
2393     _enq_task(enq_task) { }
2394 
2395   virtual void work(uint worker_id) {
2396     _enq_task.work(worker_id);
2397   }
2398 };
2399 
2400 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2401   assert(_workers != NULL, "Need parallel worker threads.");
2402 
2403   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2404 
2405   _g1h->set_par_threads(_active_workers);
2406   _workers->run_task(&enq_task_proxy);
2407   _g1h->set_par_threads(0);
2408 }
2409 
2410 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2411   ResourceMark rm;
2412   HandleMark   hm;
2413 
2414   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2415 
2416   // Is alive closure.
2417   G1CMIsAliveClosure g1_is_alive(g1h);
2418 
2419   // Inner scope to exclude the cleaning of the string and symbol
2420   // tables from the displayed time.
2421   {
2422     if (G1Log::finer()) {
2423       gclog_or_tty->put(' ');
2424     }
2425     TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
2426 
2427     ReferenceProcessor* rp = g1h->ref_processor_cm();
2428 
2429     // See the comment in G1CollectedHeap::ref_processing_init()
2430     // about how reference processing currently works in G1.
2431 
2432     // Process weak references.
2433     rp->setup_policy(clear_all_soft_refs);
2434     assert(_markStack.isEmpty(), "mark stack should be empty");
2435 
2436     G1CMKeepAliveClosure g1_keep_alive(g1h, this);
2437     G1CMDrainMarkingStackClosure
2438       g1_drain_mark_stack(this, &_markStack, &g1_keep_alive);
2439 
2440     // We use the work gang from the G1CollectedHeap and we utilize all
2441     // the worker threads.
2442     uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
2443     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2444 
2445     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2446                                               g1h->workers(), active_workers);
2447 
2448     if (rp->processing_is_mt()) {
2449       // Set the degree of MT here.  If the discovery is done MT, there
2450       // may have been a different number of threads doing the discovery
2451       // and a different number of discovered lists may have Ref objects.
2452       // That is OK as long as the Reference lists are balanced (see
2453       // balance_all_queues() and balance_queues()).
2454       rp->set_active_mt_degree(active_workers);
2455 
2456       rp->process_discovered_references(&g1_is_alive,
2457                                       &g1_keep_alive,
2458                                       &g1_drain_mark_stack,
2459                                       &par_task_executor);
2460 
2461       // The work routines of the parallel keep_alive and drain_marking_stack
2462       // will set the has_overflown flag if we overflow the global marking
2463       // stack.
2464     } else {
2465       rp->process_discovered_references(&g1_is_alive,
2466                                         &g1_keep_alive,
2467                                         &g1_drain_mark_stack,
2468                                         NULL);
2469     }
2470 
2471     assert(_markStack.overflow() || _markStack.isEmpty(),
2472             "mark stack should be empty (unless it overflowed)");
2473     if (_markStack.overflow()) {
2474       // Should have been done already when we tried to push an
2475       // entry on to the global mark stack. But let's do it again.
2476       set_has_overflown();
2477     }
2478 
2479     if (rp->processing_is_mt()) {
2480       assert(rp->num_q() == active_workers, "why not");
2481       rp->enqueue_discovered_references(&par_task_executor);
2482     } else {
2483       rp->enqueue_discovered_references();
2484     }
2485 
2486     rp->verify_no_references_recorded();
2487     assert(!rp->discovery_enabled(), "Post condition");
2488   }
2489 
2490   // Now clean up stale oops in StringTable
2491   StringTable::unlink(&g1_is_alive);
2492   // Clean up unreferenced symbols in symbol table.
2493   SymbolTable::unlink();
2494 }
2495 
2496 void ConcurrentMark::swapMarkBitMaps() {
2497   CMBitMapRO* temp = _prevMarkBitMap;
2498   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2499   _nextMarkBitMap  = (CMBitMap*)  temp;
2500 }
2501 
2502 class CMRemarkTask: public AbstractGangTask {
2503 private:
2504   ConcurrentMark *_cm;
2505 
2506 public:
2507   void work(uint worker_id) {
2508     // Since all available tasks are actually started, we should
2509     // only proceed if we're supposed to be actived.
2510     if (worker_id < _cm->active_tasks()) {
2511       CMTask* task = _cm->task(worker_id);
2512       task->record_start_time();
2513       do {
2514         task->do_marking_step(1000000000.0 /* something very large */,
2515                               true /* do_stealing    */,
2516                               true /* do_termination */);
2517       } while (task->has_aborted() && !_cm->has_overflown());
2518       // If we overflow, then we do not want to restart. We instead
2519       // want to abort remark and do concurrent marking again.
2520       task->record_end_time();
2521     }
2522   }
2523 
2524   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2525     AbstractGangTask("Par Remark"), _cm(cm) {
2526     _cm->terminator()->reset_for_reuse(active_workers);
2527   }
2528 };
2529 
2530 void ConcurrentMark::checkpointRootsFinalWork() {
2531   ResourceMark rm;
2532   HandleMark   hm;
2533   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2534 
2535   g1h->ensure_parsability(false);
2536 
2537   if (G1CollectedHeap::use_parallel_gc_threads()) {
2538     G1CollectedHeap::StrongRootsScope srs(g1h);
2539     // this is remark, so we'll use up all active threads
2540     uint active_workers = g1h->workers()->active_workers();
2541     if (active_workers == 0) {
2542       assert(active_workers > 0, "Should have been set earlier");
2543       active_workers = (uint) ParallelGCThreads;
2544       g1h->workers()->set_active_workers(active_workers);
2545     }
2546     set_phase(active_workers, false /* concurrent */);
2547     // Leave _parallel_marking_threads at it's
2548     // value originally calculated in the ConcurrentMark
2549     // constructor and pass values of the active workers
2550     // through the gang in the task.
2551 
2552     CMRemarkTask remarkTask(this, active_workers);
2553     g1h->set_par_threads(active_workers);
2554     g1h->workers()->run_task(&remarkTask);
2555     g1h->set_par_threads(0);
2556   } else {
2557     G1CollectedHeap::StrongRootsScope srs(g1h);
2558     // this is remark, so we'll use up all available threads
2559     uint active_workers = 1;
2560     set_phase(active_workers, false /* concurrent */);
2561 
2562     CMRemarkTask remarkTask(this, active_workers);
2563     // We will start all available threads, even if we decide that the
2564     // active_workers will be fewer. The extra ones will just bail out
2565     // immediately.
2566     remarkTask.work(0);
2567   }
2568   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2569   guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
2570 
2571   print_stats();
2572 
2573 #if VERIFY_OBJS_PROCESSED
2574   if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2575     gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2576                            _scan_obj_cl.objs_processed,
2577                            ThreadLocalObjQueue::objs_enqueued);
2578     guarantee(_scan_obj_cl.objs_processed ==
2579               ThreadLocalObjQueue::objs_enqueued,
2580               "Different number of objs processed and enqueued.");
2581   }
2582 #endif
2583 }
2584 
2585 #ifndef PRODUCT
2586 
2587 class PrintReachableOopClosure: public OopClosure {
2588 private:
2589   G1CollectedHeap* _g1h;
2590   outputStream*    _out;
2591   VerifyOption     _vo;
2592   bool             _all;
2593 
2594 public:
2595   PrintReachableOopClosure(outputStream* out,
2596                            VerifyOption  vo,
2597                            bool          all) :
2598     _g1h(G1CollectedHeap::heap()),
2599     _out(out), _vo(vo), _all(all) { }
2600 
2601   void do_oop(narrowOop* p) { do_oop_work(p); }
2602   void do_oop(      oop* p) { do_oop_work(p); }
2603 
2604   template <class T> void do_oop_work(T* p) {
2605     oop         obj = oopDesc::load_decode_heap_oop(p);
2606     const char* str = NULL;
2607     const char* str2 = "";
2608 
2609     if (obj == NULL) {
2610       str = "";
2611     } else if (!_g1h->is_in_g1_reserved(obj)) {
2612       str = " O";
2613     } else {
2614       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2615       guarantee(hr != NULL, "invariant");
2616       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2617       bool marked = _g1h->is_marked(obj, _vo);
2618 
2619       if (over_tams) {
2620         str = " >";
2621         if (marked) {
2622           str2 = " AND MARKED";
2623         }
2624       } else if (marked) {
2625         str = " M";
2626       } else {
2627         str = " NOT";
2628       }
2629     }
2630 
2631     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2632                    p, (void*) obj, str, str2);
2633   }
2634 };
2635 
2636 class PrintReachableObjectClosure : public ObjectClosure {
2637 private:
2638   G1CollectedHeap* _g1h;
2639   outputStream*    _out;
2640   VerifyOption     _vo;
2641   bool             _all;
2642   HeapRegion*      _hr;
2643 
2644 public:
2645   PrintReachableObjectClosure(outputStream* out,
2646                               VerifyOption  vo,
2647                               bool          all,
2648                               HeapRegion*   hr) :
2649     _g1h(G1CollectedHeap::heap()),
2650     _out(out), _vo(vo), _all(all), _hr(hr) { }
2651 
2652   void do_object(oop o) {
2653     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2654     bool marked = _g1h->is_marked(o, _vo);
2655     bool print_it = _all || over_tams || marked;
2656 
2657     if (print_it) {
2658       _out->print_cr(" "PTR_FORMAT"%s",
2659                      o, (over_tams) ? " >" : (marked) ? " M" : "");
2660       PrintReachableOopClosure oopCl(_out, _vo, _all);
2661       o->oop_iterate_no_header(&oopCl);
2662     }
2663   }
2664 };
2665 
2666 class PrintReachableRegionClosure : public HeapRegionClosure {
2667 private:
2668   G1CollectedHeap* _g1h;
2669   outputStream*    _out;
2670   VerifyOption     _vo;
2671   bool             _all;
2672 
2673 public:
2674   bool doHeapRegion(HeapRegion* hr) {
2675     HeapWord* b = hr->bottom();
2676     HeapWord* e = hr->end();
2677     HeapWord* t = hr->top();
2678     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2679     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2680                    "TAMS: "PTR_FORMAT, b, e, t, p);
2681     _out->cr();
2682 
2683     HeapWord* from = b;
2684     HeapWord* to   = t;
2685 
2686     if (to > from) {
2687       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2688       _out->cr();
2689       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2690       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2691       _out->cr();
2692     }
2693 
2694     return false;
2695   }
2696 
2697   PrintReachableRegionClosure(outputStream* out,
2698                               VerifyOption  vo,
2699                               bool          all) :
2700     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2701 };
2702 
2703 void ConcurrentMark::print_reachable(const char* str,
2704                                      VerifyOption vo,
2705                                      bool all) {
2706   gclog_or_tty->cr();
2707   gclog_or_tty->print_cr("== Doing heap dump... ");
2708 
2709   if (G1PrintReachableBaseFile == NULL) {
2710     gclog_or_tty->print_cr("  #### error: no base file defined");
2711     return;
2712   }
2713 
2714   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2715       (JVM_MAXPATHLEN - 1)) {
2716     gclog_or_tty->print_cr("  #### error: file name too long");
2717     return;
2718   }
2719 
2720   char file_name[JVM_MAXPATHLEN];
2721   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2722   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2723 
2724   fileStream fout(file_name);
2725   if (!fout.is_open()) {
2726     gclog_or_tty->print_cr("  #### error: could not open file");
2727     return;
2728   }
2729 
2730   outputStream* out = &fout;
2731   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2732   out->cr();
2733 
2734   out->print_cr("--- ITERATING OVER REGIONS");
2735   out->cr();
2736   PrintReachableRegionClosure rcl(out, vo, all);
2737   _g1h->heap_region_iterate(&rcl);
2738   out->cr();
2739 
2740   gclog_or_tty->print_cr("  done");
2741   gclog_or_tty->flush();
2742 }
2743 
2744 #endif // PRODUCT
2745 
2746 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2747   // Note we are overriding the read-only view of the prev map here, via
2748   // the cast.
2749   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2750 }
2751 
2752 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2753   _nextMarkBitMap->clearRange(mr);
2754 }
2755 
2756 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2757   clearRangePrevBitmap(mr);
2758   clearRangeNextBitmap(mr);
2759 }
2760 
2761 HeapRegion*
2762 ConcurrentMark::claim_region(uint worker_id) {
2763   // "checkpoint" the finger
2764   HeapWord* finger = _finger;
2765 
2766   // _heap_end will not change underneath our feet; it only changes at
2767   // yield points.
2768   while (finger < _heap_end) {
2769     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2770 
2771     // Note on how this code handles humongous regions. In the
2772     // normal case the finger will reach the start of a "starts
2773     // humongous" (SH) region. Its end will either be the end of the
2774     // last "continues humongous" (CH) region in the sequence, or the
2775     // standard end of the SH region (if the SH is the only region in
2776     // the sequence). That way claim_region() will skip over the CH
2777     // regions. However, there is a subtle race between a CM thread
2778     // executing this method and a mutator thread doing a humongous
2779     // object allocation. The two are not mutually exclusive as the CM
2780     // thread does not need to hold the Heap_lock when it gets
2781     // here. So there is a chance that claim_region() will come across
2782     // a free region that's in the progress of becoming a SH or a CH
2783     // region. In the former case, it will either
2784     //   a) Miss the update to the region's end, in which case it will
2785     //      visit every subsequent CH region, will find their bitmaps
2786     //      empty, and do nothing, or
2787     //   b) Will observe the update of the region's end (in which case
2788     //      it will skip the subsequent CH regions).
2789     // If it comes across a region that suddenly becomes CH, the
2790     // scenario will be similar to b). So, the race between
2791     // claim_region() and a humongous object allocation might force us
2792     // to do a bit of unnecessary work (due to some unnecessary bitmap
2793     // iterations) but it should not introduce and correctness issues.
2794     HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
2795     HeapWord*   bottom        = curr_region->bottom();
2796     HeapWord*   end           = curr_region->end();
2797     HeapWord*   limit         = curr_region->next_top_at_mark_start();
2798 
2799     if (verbose_low()) {
2800       gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2801                              "["PTR_FORMAT", "PTR_FORMAT"), "
2802                              "limit = "PTR_FORMAT,
2803                              worker_id, curr_region, bottom, end, limit);
2804     }
2805 
2806     // Is the gap between reading the finger and doing the CAS too long?
2807     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2808     if (res == finger) {
2809       // we succeeded
2810 
2811       // notice that _finger == end cannot be guaranteed here since,
2812       // someone else might have moved the finger even further
2813       assert(_finger >= end, "the finger should have moved forward");
2814 
2815       if (verbose_low()) {
2816         gclog_or_tty->print_cr("[%u] we were successful with region = "
2817                                PTR_FORMAT, worker_id, curr_region);
2818       }
2819 
2820       if (limit > bottom) {
2821         if (verbose_low()) {
2822           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2823                                  "returning it ", worker_id, curr_region);
2824         }
2825         return curr_region;
2826       } else {
2827         assert(limit == bottom,
2828                "the region limit should be at bottom");
2829         if (verbose_low()) {
2830           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2831                                  "returning NULL", worker_id, curr_region);
2832         }
2833         // we return NULL and the caller should try calling
2834         // claim_region() again.
2835         return NULL;
2836       }
2837     } else {
2838       assert(_finger > finger, "the finger should have moved forward");
2839       if (verbose_low()) {
2840         gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2841                                "global finger = "PTR_FORMAT", "
2842                                "our finger = "PTR_FORMAT,
2843                                worker_id, _finger, finger);
2844       }
2845 
2846       // read it again
2847       finger = _finger;
2848     }
2849   }
2850 
2851   return NULL;
2852 }
2853 
2854 #ifndef PRODUCT
2855 enum VerifyNoCSetOopsPhase {
2856   VerifyNoCSetOopsStack,
2857   VerifyNoCSetOopsQueues,
2858   VerifyNoCSetOopsSATBCompleted,
2859   VerifyNoCSetOopsSATBThread
2860 };
2861 
2862 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2863 private:
2864   G1CollectedHeap* _g1h;
2865   VerifyNoCSetOopsPhase _phase;
2866   int _info;
2867 
2868   const char* phase_str() {
2869     switch (_phase) {
2870     case VerifyNoCSetOopsStack:         return "Stack";
2871     case VerifyNoCSetOopsQueues:        return "Queue";
2872     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2873     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2874     default:                            ShouldNotReachHere();
2875     }
2876     return NULL;
2877   }
2878 
2879   void do_object_work(oop obj) {
2880     guarantee(!_g1h->obj_in_cs(obj),
2881               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2882                       (void*) obj, phase_str(), _info));
2883   }
2884 
2885 public:
2886   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2887 
2888   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2889     _phase = phase;
2890     _info = info;
2891   }
2892 
2893   virtual void do_oop(oop* p) {
2894     oop obj = oopDesc::load_decode_heap_oop(p);
2895     do_object_work(obj);
2896   }
2897 
2898   virtual void do_oop(narrowOop* p) {
2899     // We should not come across narrow oops while scanning marking
2900     // stacks and SATB buffers.
2901     ShouldNotReachHere();
2902   }
2903 
2904   virtual void do_object(oop obj) {
2905     do_object_work(obj);
2906   }
2907 };
2908 
2909 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2910                                          bool verify_enqueued_buffers,
2911                                          bool verify_thread_buffers,
2912                                          bool verify_fingers) {
2913   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2914   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2915     return;
2916   }
2917 
2918   VerifyNoCSetOopsClosure cl;
2919 
2920   if (verify_stacks) {
2921     // Verify entries on the global mark stack
2922     cl.set_phase(VerifyNoCSetOopsStack);
2923     _markStack.oops_do(&cl);
2924 
2925     // Verify entries on the task queues
2926     for (uint i = 0; i < _max_worker_id; i += 1) {
2927       cl.set_phase(VerifyNoCSetOopsQueues, i);
2928       CMTaskQueue* queue = _task_queues->queue(i);
2929       queue->oops_do(&cl);
2930     }
2931   }
2932 
2933   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2934 
2935   // Verify entries on the enqueued SATB buffers
2936   if (verify_enqueued_buffers) {
2937     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2938     satb_qs.iterate_completed_buffers_read_only(&cl);
2939   }
2940 
2941   // Verify entries on the per-thread SATB buffers
2942   if (verify_thread_buffers) {
2943     cl.set_phase(VerifyNoCSetOopsSATBThread);
2944     satb_qs.iterate_thread_buffers_read_only(&cl);
2945   }
2946 
2947   if (verify_fingers) {
2948     // Verify the global finger
2949     HeapWord* global_finger = finger();
2950     if (global_finger != NULL && global_finger < _heap_end) {
2951       // The global finger always points to a heap region boundary. We
2952       // use heap_region_containing_raw() to get the containing region
2953       // given that the global finger could be pointing to a free region
2954       // which subsequently becomes continues humongous. If that
2955       // happens, heap_region_containing() will return the bottom of the
2956       // corresponding starts humongous region and the check below will
2957       // not hold any more.
2958       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2959       guarantee(global_finger == global_hr->bottom(),
2960                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2961                         global_finger, HR_FORMAT_PARAMS(global_hr)));
2962     }
2963 
2964     // Verify the task fingers
2965     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2966     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2967       CMTask* task = _tasks[i];
2968       HeapWord* task_finger = task->finger();
2969       if (task_finger != NULL && task_finger < _heap_end) {
2970         // See above note on the global finger verification.
2971         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2972         guarantee(task_finger == task_hr->bottom() ||
2973                   !task_hr->in_collection_set(),
2974                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2975                           task_finger, HR_FORMAT_PARAMS(task_hr)));
2976       }
2977     }
2978   }
2979 }
2980 #endif // PRODUCT
2981 
2982 // Aggregate the counting data that was constructed concurrently
2983 // with marking.
2984 class AggregateCountDataHRClosure: public HeapRegionClosure {
2985   G1CollectedHeap* _g1h;
2986   ConcurrentMark* _cm;
2987   CardTableModRefBS* _ct_bs;
2988   BitMap* _cm_card_bm;
2989   uint _max_worker_id;
2990 
2991  public:
2992   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2993                               BitMap* cm_card_bm,
2994                               uint max_worker_id) :
2995     _g1h(g1h), _cm(g1h->concurrent_mark()),
2996     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
2997     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2998 
2999   bool doHeapRegion(HeapRegion* hr) {
3000     if (hr->continuesHumongous()) {
3001       // We will ignore these here and process them when their
3002       // associated "starts humongous" region is processed.
3003       // Note that we cannot rely on their associated
3004       // "starts humongous" region to have their bit set to 1
3005       // since, due to the region chunking in the parallel region
3006       // iteration, a "continues humongous" region might be visited
3007       // before its associated "starts humongous".
3008       return false;
3009     }
3010 
3011     HeapWord* start = hr->bottom();
3012     HeapWord* limit = hr->next_top_at_mark_start();
3013     HeapWord* end = hr->end();
3014 
3015     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3016            err_msg("Preconditions not met - "
3017                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3018                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3019                    start, limit, hr->top(), hr->end()));
3020 
3021     assert(hr->next_marked_bytes() == 0, "Precondition");
3022 
3023     if (start == limit) {
3024       // NTAMS of this region has not been set so nothing to do.
3025       return false;
3026     }
3027 
3028     // 'start' should be in the heap.
3029     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3030     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3031     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3032 
3033     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3034     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3035     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3036 
3037     // If ntams is not card aligned then we bump card bitmap index
3038     // for limit so that we get the all the cards spanned by
3039     // the object ending at ntams.
3040     // Note: if this is the last region in the heap then ntams
3041     // could be actually just beyond the end of the the heap;
3042     // limit_idx will then  correspond to a (non-existent) card
3043     // that is also outside the heap.
3044     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3045       limit_idx += 1;
3046     }
3047 
3048     assert(limit_idx <= end_idx, "or else use atomics");
3049 
3050     // Aggregate the "stripe" in the count data associated with hr.
3051     uint hrs_index = hr->hrs_index();
3052     size_t marked_bytes = 0;
3053 
3054     for (uint i = 0; i < _max_worker_id; i += 1) {
3055       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3056       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3057 
3058       // Fetch the marked_bytes in this region for task i and
3059       // add it to the running total for this region.
3060       marked_bytes += marked_bytes_array[hrs_index];
3061 
3062       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3063       // into the global card bitmap.
3064       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3065 
3066       while (scan_idx < limit_idx) {
3067         assert(task_card_bm->at(scan_idx) == true, "should be");
3068         _cm_card_bm->set_bit(scan_idx);
3069         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3070 
3071         // BitMap::get_next_one_offset() can handle the case when
3072         // its left_offset parameter is greater than its right_offset
3073         // parameter. It does, however, have an early exit if
3074         // left_offset == right_offset. So let's limit the value
3075         // passed in for left offset here.
3076         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3077         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3078       }
3079     }
3080 
3081     // Update the marked bytes for this region.
3082     hr->add_to_marked_bytes(marked_bytes);
3083 
3084     // Next heap region
3085     return false;
3086   }
3087 };
3088 
3089 class G1AggregateCountDataTask: public AbstractGangTask {
3090 protected:
3091   G1CollectedHeap* _g1h;
3092   ConcurrentMark* _cm;
3093   BitMap* _cm_card_bm;
3094   uint _max_worker_id;
3095   int _active_workers;
3096 
3097 public:
3098   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3099                            ConcurrentMark* cm,
3100                            BitMap* cm_card_bm,
3101                            uint max_worker_id,
3102                            int n_workers) :
3103     AbstractGangTask("Count Aggregation"),
3104     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3105     _max_worker_id(max_worker_id),
3106     _active_workers(n_workers) { }
3107 
3108   void work(uint worker_id) {
3109     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3110 
3111     if (G1CollectedHeap::use_parallel_gc_threads()) {
3112       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3113                                             _active_workers,
3114                                             HeapRegion::AggregateCountClaimValue);
3115     } else {
3116       _g1h->heap_region_iterate(&cl);
3117     }
3118   }
3119 };
3120 
3121 
3122 void ConcurrentMark::aggregate_count_data() {
3123   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3124                         _g1h->workers()->active_workers() :
3125                         1);
3126 
3127   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3128                                            _max_worker_id, n_workers);
3129 
3130   if (G1CollectedHeap::use_parallel_gc_threads()) {
3131     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3132            "sanity check");
3133     _g1h->set_par_threads(n_workers);
3134     _g1h->workers()->run_task(&g1_par_agg_task);
3135     _g1h->set_par_threads(0);
3136 
3137     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3138            "sanity check");
3139     _g1h->reset_heap_region_claim_values();
3140   } else {
3141     g1_par_agg_task.work(0);
3142   }
3143 }
3144 
3145 // Clear the per-worker arrays used to store the per-region counting data
3146 void ConcurrentMark::clear_all_count_data() {
3147   // Clear the global card bitmap - it will be filled during
3148   // liveness count aggregation (during remark) and the
3149   // final counting task.
3150   _card_bm.clear();
3151 
3152   // Clear the global region bitmap - it will be filled as part
3153   // of the final counting task.
3154   _region_bm.clear();
3155 
3156   uint max_regions = _g1h->max_regions();
3157   assert(_max_worker_id > 0, "uninitialized");
3158 
3159   for (uint i = 0; i < _max_worker_id; i += 1) {
3160     BitMap* task_card_bm = count_card_bitmap_for(i);
3161     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3162 
3163     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3164     assert(marked_bytes_array != NULL, "uninitialized");
3165 
3166     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3167     task_card_bm->clear();
3168   }
3169 }
3170 
3171 void ConcurrentMark::print_stats() {
3172   if (verbose_stats()) {
3173     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3174     for (size_t i = 0; i < _active_tasks; ++i) {
3175       _tasks[i]->print_stats();
3176       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3177     }
3178   }
3179 }
3180 
3181 // abandon current marking iteration due to a Full GC
3182 void ConcurrentMark::abort() {
3183   // Clear all marks to force marking thread to do nothing
3184   _nextMarkBitMap->clearAll();
3185   // Clear the liveness counting data
3186   clear_all_count_data();
3187   // Empty mark stack
3188   reset_marking_state();
3189   for (uint i = 0; i < _max_worker_id; ++i) {
3190     _tasks[i]->clear_region_fields();
3191   }
3192   _has_aborted = true;
3193 
3194   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3195   satb_mq_set.abandon_partial_marking();
3196   // This can be called either during or outside marking, we'll read
3197   // the expected_active value from the SATB queue set.
3198   satb_mq_set.set_active_all_threads(
3199                                  false, /* new active value */
3200                                  satb_mq_set.is_active() /* expected_active */);
3201 }
3202 
3203 static void print_ms_time_info(const char* prefix, const char* name,
3204                                NumberSeq& ns) {
3205   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3206                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3207   if (ns.num() > 0) {
3208     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3209                            prefix, ns.sd(), ns.maximum());
3210   }
3211 }
3212 
3213 void ConcurrentMark::print_summary_info() {
3214   gclog_or_tty->print_cr(" Concurrent marking:");
3215   print_ms_time_info("  ", "init marks", _init_times);
3216   print_ms_time_info("  ", "remarks", _remark_times);
3217   {
3218     print_ms_time_info("     ", "final marks", _remark_mark_times);
3219     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3220 
3221   }
3222   print_ms_time_info("  ", "cleanups", _cleanup_times);
3223   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3224                          _total_counting_time,
3225                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3226                           (double)_cleanup_times.num()
3227                          : 0.0));
3228   if (G1ScrubRemSets) {
3229     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3230                            _total_rs_scrub_time,
3231                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3232                             (double)_cleanup_times.num()
3233                            : 0.0));
3234   }
3235   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3236                          (_init_times.sum() + _remark_times.sum() +
3237                           _cleanup_times.sum())/1000.0);
3238   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3239                 "(%8.2f s marking).",
3240                 cmThread()->vtime_accum(),
3241                 cmThread()->vtime_mark_accum());
3242 }
3243 
3244 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3245   _parallel_workers->print_worker_threads_on(st);
3246 }
3247 
3248 // We take a break if someone is trying to stop the world.
3249 bool ConcurrentMark::do_yield_check(uint worker_id) {
3250   if (should_yield()) {
3251     if (worker_id == 0) {
3252       _g1h->g1_policy()->record_concurrent_pause();
3253     }
3254     cmThread()->yield();
3255     return true;
3256   } else {
3257     return false;
3258   }
3259 }
3260 
3261 bool ConcurrentMark::should_yield() {
3262   return cmThread()->should_yield();
3263 }
3264 
3265 bool ConcurrentMark::containing_card_is_marked(void* p) {
3266   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3267   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3268 }
3269 
3270 bool ConcurrentMark::containing_cards_are_marked(void* start,
3271                                                  void* last) {
3272   return containing_card_is_marked(start) &&
3273          containing_card_is_marked(last);
3274 }
3275 
3276 #ifndef PRODUCT
3277 // for debugging purposes
3278 void ConcurrentMark::print_finger() {
3279   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3280                          _heap_start, _heap_end, _finger);
3281   for (uint i = 0; i < _max_worker_id; ++i) {
3282     gclog_or_tty->print("   %u: "PTR_FORMAT, i, _tasks[i]->finger());
3283   }
3284   gclog_or_tty->print_cr("");
3285 }
3286 #endif
3287 
3288 void CMTask::scan_object(oop obj) {
3289   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3290 
3291   if (_cm->verbose_high()) {
3292     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3293                            _worker_id, (void*) obj);
3294   }
3295 
3296   size_t obj_size = obj->size();
3297   _words_scanned += obj_size;
3298 
3299   obj->oop_iterate(_cm_oop_closure);
3300   statsOnly( ++_objs_scanned );
3301   check_limits();
3302 }
3303 
3304 // Closure for iteration over bitmaps
3305 class CMBitMapClosure : public BitMapClosure {
3306 private:
3307   // the bitmap that is being iterated over
3308   CMBitMap*                   _nextMarkBitMap;
3309   ConcurrentMark*             _cm;
3310   CMTask*                     _task;
3311 
3312 public:
3313   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3314     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3315 
3316   bool do_bit(size_t offset) {
3317     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3318     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3319     assert( addr < _cm->finger(), "invariant");
3320 
3321     statsOnly( _task->increase_objs_found_on_bitmap() );
3322     assert(addr >= _task->finger(), "invariant");
3323 
3324     // We move that task's local finger along.
3325     _task->move_finger_to(addr);
3326 
3327     _task->scan_object(oop(addr));
3328     // we only partially drain the local queue and global stack
3329     _task->drain_local_queue(true);
3330     _task->drain_global_stack(true);
3331 
3332     // if the has_aborted flag has been raised, we need to bail out of
3333     // the iteration
3334     return !_task->has_aborted();
3335   }
3336 };
3337 
3338 // Closure for iterating over objects, currently only used for
3339 // processing SATB buffers.
3340 class CMObjectClosure : public ObjectClosure {
3341 private:
3342   CMTask* _task;
3343 
3344 public:
3345   void do_object(oop obj) {
3346     _task->deal_with_reference(obj);
3347   }
3348 
3349   CMObjectClosure(CMTask* task) : _task(task) { }
3350 };
3351 
3352 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3353                                ConcurrentMark* cm,
3354                                CMTask* task)
3355   : _g1h(g1h), _cm(cm), _task(task) {
3356   assert(_ref_processor == NULL, "should be initialized to NULL");
3357 
3358   if (G1UseConcMarkReferenceProcessing) {
3359     _ref_processor = g1h->ref_processor_cm();
3360     assert(_ref_processor != NULL, "should not be NULL");
3361   }
3362 }
3363 
3364 void CMTask::setup_for_region(HeapRegion* hr) {
3365   // Separated the asserts so that we know which one fires.
3366   assert(hr != NULL,
3367         "claim_region() should have filtered out continues humongous regions");
3368   assert(!hr->continuesHumongous(),
3369         "claim_region() should have filtered out continues humongous regions");
3370 
3371   if (_cm->verbose_low()) {
3372     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3373                            _worker_id, hr);
3374   }
3375 
3376   _curr_region  = hr;
3377   _finger       = hr->bottom();
3378   update_region_limit();
3379 }
3380 
3381 void CMTask::update_region_limit() {
3382   HeapRegion* hr            = _curr_region;
3383   HeapWord* bottom          = hr->bottom();
3384   HeapWord* limit           = hr->next_top_at_mark_start();
3385 
3386   if (limit == bottom) {
3387     if (_cm->verbose_low()) {
3388       gclog_or_tty->print_cr("[%u] found an empty region "
3389                              "["PTR_FORMAT", "PTR_FORMAT")",
3390                              _worker_id, bottom, limit);
3391     }
3392     // The region was collected underneath our feet.
3393     // We set the finger to bottom to ensure that the bitmap
3394     // iteration that will follow this will not do anything.
3395     // (this is not a condition that holds when we set the region up,
3396     // as the region is not supposed to be empty in the first place)
3397     _finger = bottom;
3398   } else if (limit >= _region_limit) {
3399     assert(limit >= _finger, "peace of mind");
3400   } else {
3401     assert(limit < _region_limit, "only way to get here");
3402     // This can happen under some pretty unusual circumstances.  An
3403     // evacuation pause empties the region underneath our feet (NTAMS
3404     // at bottom). We then do some allocation in the region (NTAMS
3405     // stays at bottom), followed by the region being used as a GC
3406     // alloc region (NTAMS will move to top() and the objects
3407     // originally below it will be grayed). All objects now marked in
3408     // the region are explicitly grayed, if below the global finger,
3409     // and we do not need in fact to scan anything else. So, we simply
3410     // set _finger to be limit to ensure that the bitmap iteration
3411     // doesn't do anything.
3412     _finger = limit;
3413   }
3414 
3415   _region_limit = limit;
3416 }
3417 
3418 void CMTask::giveup_current_region() {
3419   assert(_curr_region != NULL, "invariant");
3420   if (_cm->verbose_low()) {
3421     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3422                            _worker_id, _curr_region);
3423   }
3424   clear_region_fields();
3425 }
3426 
3427 void CMTask::clear_region_fields() {
3428   // Values for these three fields that indicate that we're not
3429   // holding on to a region.
3430   _curr_region   = NULL;
3431   _finger        = NULL;
3432   _region_limit  = NULL;
3433 }
3434 
3435 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3436   if (cm_oop_closure == NULL) {
3437     assert(_cm_oop_closure != NULL, "invariant");
3438   } else {
3439     assert(_cm_oop_closure == NULL, "invariant");
3440   }
3441   _cm_oop_closure = cm_oop_closure;
3442 }
3443 
3444 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3445   guarantee(nextMarkBitMap != NULL, "invariant");
3446 
3447   if (_cm->verbose_low()) {
3448     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3449   }
3450 
3451   _nextMarkBitMap                = nextMarkBitMap;
3452   clear_region_fields();
3453 
3454   _calls                         = 0;
3455   _elapsed_time_ms               = 0.0;
3456   _termination_time_ms           = 0.0;
3457   _termination_start_time_ms     = 0.0;
3458 
3459 #if _MARKING_STATS_
3460   _local_pushes                  = 0;
3461   _local_pops                    = 0;
3462   _local_max_size                = 0;
3463   _objs_scanned                  = 0;
3464   _global_pushes                 = 0;
3465   _global_pops                   = 0;
3466   _global_max_size               = 0;
3467   _global_transfers_to           = 0;
3468   _global_transfers_from         = 0;
3469   _regions_claimed               = 0;
3470   _objs_found_on_bitmap          = 0;
3471   _satb_buffers_processed        = 0;
3472   _steal_attempts                = 0;
3473   _steals                        = 0;
3474   _aborted                       = 0;
3475   _aborted_overflow              = 0;
3476   _aborted_cm_aborted            = 0;
3477   _aborted_yield                 = 0;
3478   _aborted_timed_out             = 0;
3479   _aborted_satb                  = 0;
3480   _aborted_termination           = 0;
3481 #endif // _MARKING_STATS_
3482 }
3483 
3484 bool CMTask::should_exit_termination() {
3485   regular_clock_call();
3486   // This is called when we are in the termination protocol. We should
3487   // quit if, for some reason, this task wants to abort or the global
3488   // stack is not empty (this means that we can get work from it).
3489   return !_cm->mark_stack_empty() || has_aborted();
3490 }
3491 
3492 void CMTask::reached_limit() {
3493   assert(_words_scanned >= _words_scanned_limit ||
3494          _refs_reached >= _refs_reached_limit ,
3495          "shouldn't have been called otherwise");
3496   regular_clock_call();
3497 }
3498 
3499 void CMTask::regular_clock_call() {
3500   if (has_aborted()) return;
3501 
3502   // First, we need to recalculate the words scanned and refs reached
3503   // limits for the next clock call.
3504   recalculate_limits();
3505 
3506   // During the regular clock call we do the following
3507 
3508   // (1) If an overflow has been flagged, then we abort.
3509   if (_cm->has_overflown()) {
3510     set_has_aborted();
3511     return;
3512   }
3513 
3514   // If we are not concurrent (i.e. we're doing remark) we don't need
3515   // to check anything else. The other steps are only needed during
3516   // the concurrent marking phase.
3517   if (!concurrent()) return;
3518 
3519   // (2) If marking has been aborted for Full GC, then we also abort.
3520   if (_cm->has_aborted()) {
3521     set_has_aborted();
3522     statsOnly( ++_aborted_cm_aborted );
3523     return;
3524   }
3525 
3526   double curr_time_ms = os::elapsedVTime() * 1000.0;
3527 
3528   // (3) If marking stats are enabled, then we update the step history.
3529 #if _MARKING_STATS_
3530   if (_words_scanned >= _words_scanned_limit) {
3531     ++_clock_due_to_scanning;
3532   }
3533   if (_refs_reached >= _refs_reached_limit) {
3534     ++_clock_due_to_marking;
3535   }
3536 
3537   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3538   _interval_start_time_ms = curr_time_ms;
3539   _all_clock_intervals_ms.add(last_interval_ms);
3540 
3541   if (_cm->verbose_medium()) {
3542       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3543                         "scanned = %d%s, refs reached = %d%s",
3544                         _worker_id, last_interval_ms,
3545                         _words_scanned,
3546                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3547                         _refs_reached,
3548                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3549   }
3550 #endif // _MARKING_STATS_
3551 
3552   // (4) We check whether we should yield. If we have to, then we abort.
3553   if (_cm->should_yield()) {
3554     // We should yield. To do this we abort the task. The caller is
3555     // responsible for yielding.
3556     set_has_aborted();
3557     statsOnly( ++_aborted_yield );
3558     return;
3559   }
3560 
3561   // (5) We check whether we've reached our time quota. If we have,
3562   // then we abort.
3563   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3564   if (elapsed_time_ms > _time_target_ms) {
3565     set_has_aborted();
3566     _has_timed_out = true;
3567     statsOnly( ++_aborted_timed_out );
3568     return;
3569   }
3570 
3571   // (6) Finally, we check whether there are enough completed STAB
3572   // buffers available for processing. If there are, we abort.
3573   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3574   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3575     if (_cm->verbose_low()) {
3576       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3577                              _worker_id);
3578     }
3579     // we do need to process SATB buffers, we'll abort and restart
3580     // the marking task to do so
3581     set_has_aborted();
3582     statsOnly( ++_aborted_satb );
3583     return;
3584   }
3585 }
3586 
3587 void CMTask::recalculate_limits() {
3588   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3589   _words_scanned_limit      = _real_words_scanned_limit;
3590 
3591   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3592   _refs_reached_limit       = _real_refs_reached_limit;
3593 }
3594 
3595 void CMTask::decrease_limits() {
3596   // This is called when we believe that we're going to do an infrequent
3597   // operation which will increase the per byte scanned cost (i.e. move
3598   // entries to/from the global stack). It basically tries to decrease the
3599   // scanning limit so that the clock is called earlier.
3600 
3601   if (_cm->verbose_medium()) {
3602     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3603   }
3604 
3605   _words_scanned_limit = _real_words_scanned_limit -
3606     3 * words_scanned_period / 4;
3607   _refs_reached_limit  = _real_refs_reached_limit -
3608     3 * refs_reached_period / 4;
3609 }
3610 
3611 void CMTask::move_entries_to_global_stack() {
3612   // local array where we'll store the entries that will be popped
3613   // from the local queue
3614   oop buffer[global_stack_transfer_size];
3615 
3616   int n = 0;
3617   oop obj;
3618   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3619     buffer[n] = obj;
3620     ++n;
3621   }
3622 
3623   if (n > 0) {
3624     // we popped at least one entry from the local queue
3625 
3626     statsOnly( ++_global_transfers_to; _local_pops += n );
3627 
3628     if (!_cm->mark_stack_push(buffer, n)) {
3629       if (_cm->verbose_low()) {
3630         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3631                                _worker_id);
3632       }
3633       set_has_aborted();
3634     } else {
3635       // the transfer was successful
3636 
3637       if (_cm->verbose_medium()) {
3638         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3639                                _worker_id, n);
3640       }
3641       statsOnly( int tmp_size = _cm->mark_stack_size();
3642                  if (tmp_size > _global_max_size) {
3643                    _global_max_size = tmp_size;
3644                  }
3645                  _global_pushes += n );
3646     }
3647   }
3648 
3649   // this operation was quite expensive, so decrease the limits
3650   decrease_limits();
3651 }
3652 
3653 void CMTask::get_entries_from_global_stack() {
3654   // local array where we'll store the entries that will be popped
3655   // from the global stack.
3656   oop buffer[global_stack_transfer_size];
3657   int n;
3658   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3659   assert(n <= global_stack_transfer_size,
3660          "we should not pop more than the given limit");
3661   if (n > 0) {
3662     // yes, we did actually pop at least one entry
3663 
3664     statsOnly( ++_global_transfers_from; _global_pops += n );
3665     if (_cm->verbose_medium()) {
3666       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3667                              _worker_id, n);
3668     }
3669     for (int i = 0; i < n; ++i) {
3670       bool success = _task_queue->push(buffer[i]);
3671       // We only call this when the local queue is empty or under a
3672       // given target limit. So, we do not expect this push to fail.
3673       assert(success, "invariant");
3674     }
3675 
3676     statsOnly( int tmp_size = _task_queue->size();
3677                if (tmp_size > _local_max_size) {
3678                  _local_max_size = tmp_size;
3679                }
3680                _local_pushes += n );
3681   }
3682 
3683   // this operation was quite expensive, so decrease the limits
3684   decrease_limits();
3685 }
3686 
3687 void CMTask::drain_local_queue(bool partially) {
3688   if (has_aborted()) return;
3689 
3690   // Decide what the target size is, depending whether we're going to
3691   // drain it partially (so that other tasks can steal if they run out
3692   // of things to do) or totally (at the very end).
3693   size_t target_size;
3694   if (partially) {
3695     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3696   } else {
3697     target_size = 0;
3698   }
3699 
3700   if (_task_queue->size() > target_size) {
3701     if (_cm->verbose_high()) {
3702       gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
3703                              _worker_id, target_size);
3704     }
3705 
3706     oop obj;
3707     bool ret = _task_queue->pop_local(obj);
3708     while (ret) {
3709       statsOnly( ++_local_pops );
3710 
3711       if (_cm->verbose_high()) {
3712         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3713                                (void*) obj);
3714       }
3715 
3716       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3717       assert(!_g1h->is_on_master_free_list(
3718                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3719 
3720       scan_object(obj);
3721 
3722       if (_task_queue->size() <= target_size || has_aborted()) {
3723         ret = false;
3724       } else {
3725         ret = _task_queue->pop_local(obj);
3726       }
3727     }
3728 
3729     if (_cm->verbose_high()) {
3730       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3731                              _worker_id, _task_queue->size());
3732     }
3733   }
3734 }
3735 
3736 void CMTask::drain_global_stack(bool partially) {
3737   if (has_aborted()) return;
3738 
3739   // We have a policy to drain the local queue before we attempt to
3740   // drain the global stack.
3741   assert(partially || _task_queue->size() == 0, "invariant");
3742 
3743   // Decide what the target size is, depending whether we're going to
3744   // drain it partially (so that other tasks can steal if they run out
3745   // of things to do) or totally (at the very end).  Notice that,
3746   // because we move entries from the global stack in chunks or
3747   // because another task might be doing the same, we might in fact
3748   // drop below the target. But, this is not a problem.
3749   size_t target_size;
3750   if (partially) {
3751     target_size = _cm->partial_mark_stack_size_target();
3752   } else {
3753     target_size = 0;
3754   }
3755 
3756   if (_cm->mark_stack_size() > target_size) {
3757     if (_cm->verbose_low()) {
3758       gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
3759                              _worker_id, target_size);
3760     }
3761 
3762     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3763       get_entries_from_global_stack();
3764       drain_local_queue(partially);
3765     }
3766 
3767     if (_cm->verbose_low()) {
3768       gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
3769                              _worker_id, _cm->mark_stack_size());
3770     }
3771   }
3772 }
3773 
3774 // SATB Queue has several assumptions on whether to call the par or
3775 // non-par versions of the methods. this is why some of the code is
3776 // replicated. We should really get rid of the single-threaded version
3777 // of the code to simplify things.
3778 void CMTask::drain_satb_buffers() {
3779   if (has_aborted()) return;
3780 
3781   // We set this so that the regular clock knows that we're in the
3782   // middle of draining buffers and doesn't set the abort flag when it
3783   // notices that SATB buffers are available for draining. It'd be
3784   // very counter productive if it did that. :-)
3785   _draining_satb_buffers = true;
3786 
3787   CMObjectClosure oc(this);
3788   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3789   if (G1CollectedHeap::use_parallel_gc_threads()) {
3790     satb_mq_set.set_par_closure(_worker_id, &oc);
3791   } else {
3792     satb_mq_set.set_closure(&oc);
3793   }
3794 
3795   // This keeps claiming and applying the closure to completed buffers
3796   // until we run out of buffers or we need to abort.
3797   if (G1CollectedHeap::use_parallel_gc_threads()) {
3798     while (!has_aborted() &&
3799            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3800       if (_cm->verbose_medium()) {
3801         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3802       }
3803       statsOnly( ++_satb_buffers_processed );
3804       regular_clock_call();
3805     }
3806   } else {
3807     while (!has_aborted() &&
3808            satb_mq_set.apply_closure_to_completed_buffer()) {
3809       if (_cm->verbose_medium()) {
3810         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3811       }
3812       statsOnly( ++_satb_buffers_processed );
3813       regular_clock_call();
3814     }
3815   }
3816 
3817   if (!concurrent() && !has_aborted()) {
3818     // We should only do this during remark.
3819     if (G1CollectedHeap::use_parallel_gc_threads()) {
3820       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3821     } else {
3822       satb_mq_set.iterate_closure_all_threads();
3823     }
3824   }
3825 
3826   _draining_satb_buffers = false;
3827 
3828   assert(has_aborted() ||
3829          concurrent() ||
3830          satb_mq_set.completed_buffers_num() == 0, "invariant");
3831 
3832   if (G1CollectedHeap::use_parallel_gc_threads()) {
3833     satb_mq_set.set_par_closure(_worker_id, NULL);
3834   } else {
3835     satb_mq_set.set_closure(NULL);
3836   }
3837 
3838   // again, this was a potentially expensive operation, decrease the
3839   // limits to get the regular clock call early
3840   decrease_limits();
3841 }
3842 
3843 void CMTask::print_stats() {
3844   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3845                          _worker_id, _calls);
3846   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3847                          _elapsed_time_ms, _termination_time_ms);
3848   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3849                          _step_times_ms.num(), _step_times_ms.avg(),
3850                          _step_times_ms.sd());
3851   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3852                          _step_times_ms.maximum(), _step_times_ms.sum());
3853 
3854 #if _MARKING_STATS_
3855   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3856                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3857                          _all_clock_intervals_ms.sd());
3858   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3859                          _all_clock_intervals_ms.maximum(),
3860                          _all_clock_intervals_ms.sum());
3861   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
3862                          _clock_due_to_scanning, _clock_due_to_marking);
3863   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
3864                          _objs_scanned, _objs_found_on_bitmap);
3865   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
3866                          _local_pushes, _local_pops, _local_max_size);
3867   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
3868                          _global_pushes, _global_pops, _global_max_size);
3869   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
3870                          _global_transfers_to,_global_transfers_from);
3871   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
3872   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
3873   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
3874                          _steal_attempts, _steals);
3875   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
3876   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
3877                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3878   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
3879                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3880 #endif // _MARKING_STATS_
3881 }
3882 
3883 /*****************************************************************************
3884 
3885     The do_marking_step(time_target_ms) method is the building block
3886     of the parallel marking framework. It can be called in parallel
3887     with other invocations of do_marking_step() on different tasks
3888     (but only one per task, obviously) and concurrently with the
3889     mutator threads, or during remark, hence it eliminates the need
3890     for two versions of the code. When called during remark, it will
3891     pick up from where the task left off during the concurrent marking
3892     phase. Interestingly, tasks are also claimable during evacuation
3893     pauses too, since do_marking_step() ensures that it aborts before
3894     it needs to yield.
3895 
3896     The data structures that is uses to do marking work are the
3897     following:
3898 
3899       (1) Marking Bitmap. If there are gray objects that appear only
3900       on the bitmap (this happens either when dealing with an overflow
3901       or when the initial marking phase has simply marked the roots
3902       and didn't push them on the stack), then tasks claim heap
3903       regions whose bitmap they then scan to find gray objects. A
3904       global finger indicates where the end of the last claimed region
3905       is. A local finger indicates how far into the region a task has
3906       scanned. The two fingers are used to determine how to gray an
3907       object (i.e. whether simply marking it is OK, as it will be
3908       visited by a task in the future, or whether it needs to be also
3909       pushed on a stack).
3910 
3911       (2) Local Queue. The local queue of the task which is accessed
3912       reasonably efficiently by the task. Other tasks can steal from
3913       it when they run out of work. Throughout the marking phase, a
3914       task attempts to keep its local queue short but not totally
3915       empty, so that entries are available for stealing by other
3916       tasks. Only when there is no more work, a task will totally
3917       drain its local queue.
3918 
3919       (3) Global Mark Stack. This handles local queue overflow. During
3920       marking only sets of entries are moved between it and the local
3921       queues, as access to it requires a mutex and more fine-grain
3922       interaction with it which might cause contention. If it
3923       overflows, then the marking phase should restart and iterate
3924       over the bitmap to identify gray objects. Throughout the marking
3925       phase, tasks attempt to keep the global mark stack at a small
3926       length but not totally empty, so that entries are available for
3927       popping by other tasks. Only when there is no more work, tasks
3928       will totally drain the global mark stack.
3929 
3930       (4) SATB Buffer Queue. This is where completed SATB buffers are
3931       made available. Buffers are regularly removed from this queue
3932       and scanned for roots, so that the queue doesn't get too
3933       long. During remark, all completed buffers are processed, as
3934       well as the filled in parts of any uncompleted buffers.
3935 
3936     The do_marking_step() method tries to abort when the time target
3937     has been reached. There are a few other cases when the
3938     do_marking_step() method also aborts:
3939 
3940       (1) When the marking phase has been aborted (after a Full GC).
3941 
3942       (2) When a global overflow (on the global stack) has been
3943       triggered. Before the task aborts, it will actually sync up with
3944       the other tasks to ensure that all the marking data structures
3945       (local queues, stacks, fingers etc.)  are re-initialised so that
3946       when do_marking_step() completes, the marking phase can
3947       immediately restart.
3948 
3949       (3) When enough completed SATB buffers are available. The
3950       do_marking_step() method only tries to drain SATB buffers right
3951       at the beginning. So, if enough buffers are available, the
3952       marking step aborts and the SATB buffers are processed at
3953       the beginning of the next invocation.
3954 
3955       (4) To yield. when we have to yield then we abort and yield
3956       right at the end of do_marking_step(). This saves us from a lot
3957       of hassle as, by yielding we might allow a Full GC. If this
3958       happens then objects will be compacted underneath our feet, the
3959       heap might shrink, etc. We save checking for this by just
3960       aborting and doing the yield right at the end.
3961 
3962     From the above it follows that the do_marking_step() method should
3963     be called in a loop (or, otherwise, regularly) until it completes.
3964 
3965     If a marking step completes without its has_aborted() flag being
3966     true, it means it has completed the current marking phase (and
3967     also all other marking tasks have done so and have all synced up).
3968 
3969     A method called regular_clock_call() is invoked "regularly" (in
3970     sub ms intervals) throughout marking. It is this clock method that
3971     checks all the abort conditions which were mentioned above and
3972     decides when the task should abort. A work-based scheme is used to
3973     trigger this clock method: when the number of object words the
3974     marking phase has scanned or the number of references the marking
3975     phase has visited reach a given limit. Additional invocations to
3976     the method clock have been planted in a few other strategic places
3977     too. The initial reason for the clock method was to avoid calling
3978     vtime too regularly, as it is quite expensive. So, once it was in
3979     place, it was natural to piggy-back all the other conditions on it
3980     too and not constantly check them throughout the code.
3981 
3982  *****************************************************************************/
3983 
3984 void CMTask::do_marking_step(double time_target_ms,
3985                              bool do_stealing,
3986                              bool do_termination) {
3987   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3988   assert(concurrent() == _cm->concurrent(), "they should be the same");
3989 
3990   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3991   assert(_task_queues != NULL, "invariant");
3992   assert(_task_queue != NULL, "invariant");
3993   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
3994 
3995   assert(!_claimed,
3996          "only one thread should claim this task at any one time");
3997 
3998   // OK, this doesn't safeguard again all possible scenarios, as it is
3999   // possible for two threads to set the _claimed flag at the same
4000   // time. But it is only for debugging purposes anyway and it will
4001   // catch most problems.
4002   _claimed = true;
4003 
4004   _start_time_ms = os::elapsedVTime() * 1000.0;
4005   statsOnly( _interval_start_time_ms = _start_time_ms );
4006 
4007   double diff_prediction_ms =
4008     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4009   _time_target_ms = time_target_ms - diff_prediction_ms;
4010 
4011   // set up the variables that are used in the work-based scheme to
4012   // call the regular clock method
4013   _words_scanned = 0;
4014   _refs_reached  = 0;
4015   recalculate_limits();
4016 
4017   // clear all flags
4018   clear_has_aborted();
4019   _has_timed_out = false;
4020   _draining_satb_buffers = false;
4021 
4022   ++_calls;
4023 
4024   if (_cm->verbose_low()) {
4025     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4026                            "target = %1.2lfms >>>>>>>>>>",
4027                            _worker_id, _calls, _time_target_ms);
4028   }
4029 
4030   // Set up the bitmap and oop closures. Anything that uses them is
4031   // eventually called from this method, so it is OK to allocate these
4032   // statically.
4033   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4034   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4035   set_cm_oop_closure(&cm_oop_closure);
4036 
4037   if (_cm->has_overflown()) {
4038     // This can happen if the mark stack overflows during a GC pause
4039     // and this task, after a yield point, restarts. We have to abort
4040     // as we need to get into the overflow protocol which happens
4041     // right at the end of this task.
4042     set_has_aborted();
4043   }
4044 
4045   // First drain any available SATB buffers. After this, we will not
4046   // look at SATB buffers before the next invocation of this method.
4047   // If enough completed SATB buffers are queued up, the regular clock
4048   // will abort this task so that it restarts.
4049   drain_satb_buffers();
4050   // ...then partially drain the local queue and the global stack
4051   drain_local_queue(true);
4052   drain_global_stack(true);
4053 
4054   do {
4055     if (!has_aborted() && _curr_region != NULL) {
4056       // This means that we're already holding on to a region.
4057       assert(_finger != NULL, "if region is not NULL, then the finger "
4058              "should not be NULL either");
4059 
4060       // We might have restarted this task after an evacuation pause
4061       // which might have evacuated the region we're holding on to
4062       // underneath our feet. Let's read its limit again to make sure
4063       // that we do not iterate over a region of the heap that
4064       // contains garbage (update_region_limit() will also move
4065       // _finger to the start of the region if it is found empty).
4066       update_region_limit();
4067       // We will start from _finger not from the start of the region,
4068       // as we might be restarting this task after aborting half-way
4069       // through scanning this region. In this case, _finger points to
4070       // the address where we last found a marked object. If this is a
4071       // fresh region, _finger points to start().
4072       MemRegion mr = MemRegion(_finger, _region_limit);
4073 
4074       if (_cm->verbose_low()) {
4075         gclog_or_tty->print_cr("[%u] we're scanning part "
4076                                "["PTR_FORMAT", "PTR_FORMAT") "
4077                                "of region "PTR_FORMAT,
4078                                _worker_id, _finger, _region_limit, _curr_region);
4079       }
4080 
4081       // Let's iterate over the bitmap of the part of the
4082       // region that is left.
4083       if (mr.is_empty() || _nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4084         // We successfully completed iterating over the region. Now,
4085         // let's give up the region.
4086         giveup_current_region();
4087         regular_clock_call();
4088       } else {
4089         assert(has_aborted(), "currently the only way to do so");
4090         // The only way to abort the bitmap iteration is to return
4091         // false from the do_bit() method. However, inside the
4092         // do_bit() method we move the _finger to point to the
4093         // object currently being looked at. So, if we bail out, we
4094         // have definitely set _finger to something non-null.
4095         assert(_finger != NULL, "invariant");
4096 
4097         // Region iteration was actually aborted. So now _finger
4098         // points to the address of the object we last scanned. If we
4099         // leave it there, when we restart this task, we will rescan
4100         // the object. It is easy to avoid this. We move the finger by
4101         // enough to point to the next possible object header (the
4102         // bitmap knows by how much we need to move it as it knows its
4103         // granularity).
4104         assert(_finger < _region_limit, "invariant");
4105         HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
4106         // Check if bitmap iteration was aborted while scanning the last object
4107         if (new_finger >= _region_limit) {
4108           giveup_current_region();
4109         } else {
4110           move_finger_to(new_finger);
4111         }
4112       }
4113     }
4114     // At this point we have either completed iterating over the
4115     // region we were holding on to, or we have aborted.
4116 
4117     // We then partially drain the local queue and the global stack.
4118     // (Do we really need this?)
4119     drain_local_queue(true);
4120     drain_global_stack(true);
4121 
4122     // Read the note on the claim_region() method on why it might
4123     // return NULL with potentially more regions available for
4124     // claiming and why we have to check out_of_regions() to determine
4125     // whether we're done or not.
4126     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4127       // We are going to try to claim a new region. We should have
4128       // given up on the previous one.
4129       // Separated the asserts so that we know which one fires.
4130       assert(_curr_region  == NULL, "invariant");
4131       assert(_finger       == NULL, "invariant");
4132       assert(_region_limit == NULL, "invariant");
4133       if (_cm->verbose_low()) {
4134         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4135       }
4136       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4137       if (claimed_region != NULL) {
4138         // Yes, we managed to claim one
4139         statsOnly( ++_regions_claimed );
4140 
4141         if (_cm->verbose_low()) {
4142           gclog_or_tty->print_cr("[%u] we successfully claimed "
4143                                  "region "PTR_FORMAT,
4144                                  _worker_id, claimed_region);
4145         }
4146 
4147         setup_for_region(claimed_region);
4148         assert(_curr_region == claimed_region, "invariant");
4149       }
4150       // It is important to call the regular clock here. It might take
4151       // a while to claim a region if, for example, we hit a large
4152       // block of empty regions. So we need to call the regular clock
4153       // method once round the loop to make sure it's called
4154       // frequently enough.
4155       regular_clock_call();
4156     }
4157 
4158     if (!has_aborted() && _curr_region == NULL) {
4159       assert(_cm->out_of_regions(),
4160              "at this point we should be out of regions");
4161     }
4162   } while ( _curr_region != NULL && !has_aborted());
4163 
4164   if (!has_aborted()) {
4165     // We cannot check whether the global stack is empty, since other
4166     // tasks might be pushing objects to it concurrently.
4167     assert(_cm->out_of_regions(),
4168            "at this point we should be out of regions");
4169 
4170     if (_cm->verbose_low()) {
4171       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4172     }
4173 
4174     // Try to reduce the number of available SATB buffers so that
4175     // remark has less work to do.
4176     drain_satb_buffers();
4177   }
4178 
4179   // Since we've done everything else, we can now totally drain the
4180   // local queue and global stack.
4181   drain_local_queue(false);
4182   drain_global_stack(false);
4183 
4184   // Attempt at work stealing from other task's queues.
4185   if (do_stealing && !has_aborted()) {
4186     // We have not aborted. This means that we have finished all that
4187     // we could. Let's try to do some stealing...
4188 
4189     // We cannot check whether the global stack is empty, since other
4190     // tasks might be pushing objects to it concurrently.
4191     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4192            "only way to reach here");
4193 
4194     if (_cm->verbose_low()) {
4195       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4196     }
4197 
4198     while (!has_aborted()) {
4199       oop obj;
4200       statsOnly( ++_steal_attempts );
4201 
4202       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4203         if (_cm->verbose_medium()) {
4204           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4205                                  _worker_id, (void*) obj);
4206         }
4207 
4208         statsOnly( ++_steals );
4209 
4210         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4211                "any stolen object should be marked");
4212         scan_object(obj);
4213 
4214         // And since we're towards the end, let's totally drain the
4215         // local queue and global stack.
4216         drain_local_queue(false);
4217         drain_global_stack(false);
4218       } else {
4219         break;
4220       }
4221     }
4222   }
4223 
4224   // If we are about to wrap up and go into termination, check if we
4225   // should raise the overflow flag.
4226   if (do_termination && !has_aborted()) {
4227     if (_cm->force_overflow()->should_force()) {
4228       _cm->set_has_overflown();
4229       regular_clock_call();
4230     }
4231   }
4232 
4233   // We still haven't aborted. Now, let's try to get into the
4234   // termination protocol.
4235   if (do_termination && !has_aborted()) {
4236     // We cannot check whether the global stack is empty, since other
4237     // tasks might be concurrently pushing objects on it.
4238     // Separated the asserts so that we know which one fires.
4239     assert(_cm->out_of_regions(), "only way to reach here");
4240     assert(_task_queue->size() == 0, "only way to reach here");
4241 
4242     if (_cm->verbose_low()) {
4243       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4244     }
4245 
4246     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4247     // The CMTask class also extends the TerminatorTerminator class,
4248     // hence its should_exit_termination() method will also decide
4249     // whether to exit the termination protocol or not.
4250     bool finished = _cm->terminator()->offer_termination(this);
4251     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4252     _termination_time_ms +=
4253       termination_end_time_ms - _termination_start_time_ms;
4254 
4255     if (finished) {
4256       // We're all done.
4257 
4258       if (_worker_id == 0) {
4259         // let's allow task 0 to do this
4260         if (concurrent()) {
4261           assert(_cm->concurrent_marking_in_progress(), "invariant");
4262           // we need to set this to false before the next
4263           // safepoint. This way we ensure that the marking phase
4264           // doesn't observe any more heap expansions.
4265           _cm->clear_concurrent_marking_in_progress();
4266         }
4267       }
4268 
4269       // We can now guarantee that the global stack is empty, since
4270       // all other tasks have finished. We separated the guarantees so
4271       // that, if a condition is false, we can immediately find out
4272       // which one.
4273       guarantee(_cm->out_of_regions(), "only way to reach here");
4274       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4275       guarantee(_task_queue->size() == 0, "only way to reach here");
4276       guarantee(!_cm->has_overflown(), "only way to reach here");
4277       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4278 
4279       if (_cm->verbose_low()) {
4280         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4281       }
4282     } else {
4283       // Apparently there's more work to do. Let's abort this task. It
4284       // will restart it and we can hopefully find more things to do.
4285 
4286       if (_cm->verbose_low()) {
4287         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4288                                _worker_id);
4289       }
4290 
4291       set_has_aborted();
4292       statsOnly( ++_aborted_termination );
4293     }
4294   }
4295 
4296   // Mainly for debugging purposes to make sure that a pointer to the
4297   // closure which was statically allocated in this frame doesn't
4298   // escape it by accident.
4299   set_cm_oop_closure(NULL);
4300   double end_time_ms = os::elapsedVTime() * 1000.0;
4301   double elapsed_time_ms = end_time_ms - _start_time_ms;
4302   // Update the step history.
4303   _step_times_ms.add(elapsed_time_ms);
4304 
4305   if (has_aborted()) {
4306     // The task was aborted for some reason.
4307 
4308     statsOnly( ++_aborted );
4309 
4310     if (_has_timed_out) {
4311       double diff_ms = elapsed_time_ms - _time_target_ms;
4312       // Keep statistics of how well we did with respect to hitting
4313       // our target only if we actually timed out (if we aborted for
4314       // other reasons, then the results might get skewed).
4315       _marking_step_diffs_ms.add(diff_ms);
4316     }
4317 
4318     if (_cm->has_overflown()) {
4319       // This is the interesting one. We aborted because a global
4320       // overflow was raised. This means we have to restart the
4321       // marking phase and start iterating over regions. However, in
4322       // order to do this we have to make sure that all tasks stop
4323       // what they are doing and re-initialise in a safe manner. We
4324       // will achieve this with the use of two barrier sync points.
4325 
4326       if (_cm->verbose_low()) {
4327         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4328       }
4329 
4330       _cm->enter_first_sync_barrier(_worker_id);
4331       // When we exit this sync barrier we know that all tasks have
4332       // stopped doing marking work. So, it's now safe to
4333       // re-initialise our data structures. At the end of this method,
4334       // task 0 will clear the global data structures.
4335 
4336       statsOnly( ++_aborted_overflow );
4337 
4338       // We clear the local state of this task...
4339       clear_region_fields();
4340 
4341       // ...and enter the second barrier.
4342       _cm->enter_second_sync_barrier(_worker_id);
4343       // At this point everything has bee re-initialised and we're
4344       // ready to restart.
4345     }
4346 
4347     if (_cm->verbose_low()) {
4348       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4349                              "elapsed = %1.2lfms <<<<<<<<<<",
4350                              _worker_id, _time_target_ms, elapsed_time_ms);
4351       if (_cm->has_aborted()) {
4352         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4353                                _worker_id);
4354       }
4355     }
4356   } else {
4357     if (_cm->verbose_low()) {
4358       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4359                              "elapsed = %1.2lfms <<<<<<<<<<",
4360                              _worker_id, _time_target_ms, elapsed_time_ms);
4361     }
4362   }
4363 
4364   _claimed = false;
4365 }
4366 
4367 CMTask::CMTask(uint worker_id,
4368                ConcurrentMark* cm,
4369                size_t* marked_bytes,
4370                BitMap* card_bm,
4371                CMTaskQueue* task_queue,
4372                CMTaskQueueSet* task_queues)
4373   : _g1h(G1CollectedHeap::heap()),
4374     _worker_id(worker_id), _cm(cm),
4375     _claimed(false),
4376     _nextMarkBitMap(NULL), _hash_seed(17),
4377     _task_queue(task_queue),
4378     _task_queues(task_queues),
4379     _cm_oop_closure(NULL),
4380     _marked_bytes_array(marked_bytes),
4381     _card_bm(card_bm) {
4382   guarantee(task_queue != NULL, "invariant");
4383   guarantee(task_queues != NULL, "invariant");
4384 
4385   statsOnly( _clock_due_to_scanning = 0;
4386              _clock_due_to_marking  = 0 );
4387 
4388   _marking_step_diffs_ms.add(0.5);
4389 }
4390 
4391 // These are formatting macros that are used below to ensure
4392 // consistent formatting. The *_H_* versions are used to format the
4393 // header for a particular value and they should be kept consistent
4394 // with the corresponding macro. Also note that most of the macros add
4395 // the necessary white space (as a prefix) which makes them a bit
4396 // easier to compose.
4397 
4398 // All the output lines are prefixed with this string to be able to
4399 // identify them easily in a large log file.
4400 #define G1PPRL_LINE_PREFIX            "###"
4401 
4402 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4403 #ifdef _LP64
4404 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4405 #else // _LP64
4406 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4407 #endif // _LP64
4408 
4409 // For per-region info
4410 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4411 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4412 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4413 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4414 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4415 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4416 
4417 // For summary info
4418 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4419 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4420 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4421 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4422 
4423 G1PrintRegionLivenessInfoClosure::
4424 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4425   : _out(out),
4426     _total_used_bytes(0), _total_capacity_bytes(0),
4427     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4428     _hum_used_bytes(0), _hum_capacity_bytes(0),
4429     _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
4430   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4431   MemRegion g1_committed = g1h->g1_committed();
4432   MemRegion g1_reserved = g1h->g1_reserved();
4433   double now = os::elapsedTime();
4434 
4435   // Print the header of the output.
4436   _out->cr();
4437   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4438   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4439                  G1PPRL_SUM_ADDR_FORMAT("committed")
4440                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4441                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4442                  g1_committed.start(), g1_committed.end(),
4443                  g1_reserved.start(), g1_reserved.end(),
4444                  HeapRegion::GrainBytes);
4445   _out->print_cr(G1PPRL_LINE_PREFIX);
4446   _out->print_cr(G1PPRL_LINE_PREFIX
4447                  G1PPRL_TYPE_H_FORMAT
4448                  G1PPRL_ADDR_BASE_H_FORMAT
4449                  G1PPRL_BYTE_H_FORMAT
4450                  G1PPRL_BYTE_H_FORMAT
4451                  G1PPRL_BYTE_H_FORMAT
4452                  G1PPRL_DOUBLE_H_FORMAT,
4453                  "type", "address-range",
4454                  "used", "prev-live", "next-live", "gc-eff");
4455   _out->print_cr(G1PPRL_LINE_PREFIX
4456                  G1PPRL_TYPE_H_FORMAT
4457                  G1PPRL_ADDR_BASE_H_FORMAT
4458                  G1PPRL_BYTE_H_FORMAT
4459                  G1PPRL_BYTE_H_FORMAT
4460                  G1PPRL_BYTE_H_FORMAT
4461                  G1PPRL_DOUBLE_H_FORMAT,
4462                  "", "",
4463                  "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
4464 }
4465 
4466 // It takes as a parameter a reference to one of the _hum_* fields, it
4467 // deduces the corresponding value for a region in a humongous region
4468 // series (either the region size, or what's left if the _hum_* field
4469 // is < the region size), and updates the _hum_* field accordingly.
4470 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4471   size_t bytes = 0;
4472   // The > 0 check is to deal with the prev and next live bytes which
4473   // could be 0.
4474   if (*hum_bytes > 0) {
4475     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4476     *hum_bytes -= bytes;
4477   }
4478   return bytes;
4479 }
4480 
4481 // It deduces the values for a region in a humongous region series
4482 // from the _hum_* fields and updates those accordingly. It assumes
4483 // that that _hum_* fields have already been set up from the "starts
4484 // humongous" region and we visit the regions in address order.
4485 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4486                                                      size_t* capacity_bytes,
4487                                                      size_t* prev_live_bytes,
4488                                                      size_t* next_live_bytes) {
4489   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4490   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4491   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4492   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4493   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4494 }
4495 
4496 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4497   const char* type = "";
4498   HeapWord* bottom       = r->bottom();
4499   HeapWord* end          = r->end();
4500   size_t capacity_bytes  = r->capacity();
4501   size_t used_bytes      = r->used();
4502   size_t prev_live_bytes = r->live_bytes();
4503   size_t next_live_bytes = r->next_live_bytes();
4504   double gc_eff          = r->gc_efficiency();
4505   if (r->used() == 0) {
4506     type = "FREE";
4507   } else if (r->is_survivor()) {
4508     type = "SURV";
4509   } else if (r->is_young()) {
4510     type = "EDEN";
4511   } else if (r->startsHumongous()) {
4512     type = "HUMS";
4513 
4514     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4515            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4516            "they should have been zeroed after the last time we used them");
4517     // Set up the _hum_* fields.
4518     _hum_capacity_bytes  = capacity_bytes;
4519     _hum_used_bytes      = used_bytes;
4520     _hum_prev_live_bytes = prev_live_bytes;
4521     _hum_next_live_bytes = next_live_bytes;
4522     get_hum_bytes(&used_bytes, &capacity_bytes,
4523                   &prev_live_bytes, &next_live_bytes);
4524     end = bottom + HeapRegion::GrainWords;
4525   } else if (r->continuesHumongous()) {
4526     type = "HUMC";
4527     get_hum_bytes(&used_bytes, &capacity_bytes,
4528                   &prev_live_bytes, &next_live_bytes);
4529     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4530   } else {
4531     type = "OLD";
4532   }
4533 
4534   _total_used_bytes      += used_bytes;
4535   _total_capacity_bytes  += capacity_bytes;
4536   _total_prev_live_bytes += prev_live_bytes;
4537   _total_next_live_bytes += next_live_bytes;
4538 
4539   // Print a line for this particular region.
4540   _out->print_cr(G1PPRL_LINE_PREFIX
4541                  G1PPRL_TYPE_FORMAT
4542                  G1PPRL_ADDR_BASE_FORMAT
4543                  G1PPRL_BYTE_FORMAT
4544                  G1PPRL_BYTE_FORMAT
4545                  G1PPRL_BYTE_FORMAT
4546                  G1PPRL_DOUBLE_FORMAT,
4547                  type, bottom, end,
4548                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
4549 
4550   return false;
4551 }
4552 
4553 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4554   // Print the footer of the output.
4555   _out->print_cr(G1PPRL_LINE_PREFIX);
4556   _out->print_cr(G1PPRL_LINE_PREFIX
4557                  " SUMMARY"
4558                  G1PPRL_SUM_MB_FORMAT("capacity")
4559                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4560                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4561                  G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
4562                  bytes_to_mb(_total_capacity_bytes),
4563                  bytes_to_mb(_total_used_bytes),
4564                  perc(_total_used_bytes, _total_capacity_bytes),
4565                  bytes_to_mb(_total_prev_live_bytes),
4566                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4567                  bytes_to_mb(_total_next_live_bytes),
4568                  perc(_total_next_live_bytes, _total_capacity_bytes));
4569   _out->cr();
4570 }