1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  33 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  34 #include "gc_implementation/g1/g1Log.hpp"
  35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  36 #include "gc_implementation/g1/g1RemSet.hpp"
  37 #include "gc_implementation/g1/heapRegion.inline.hpp"
  38 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
  39 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  40 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  41 #include "gc_implementation/shared/vmGCOperations.hpp"
  42 #include "gc_implementation/shared/gcTimer.hpp"
  43 #include "gc_implementation/shared/gcTrace.hpp"
  44 #include "gc_implementation/shared/gcTraceTime.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/genOopClosures.inline.hpp"
  47 #include "memory/referencePolicy.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "runtime/handles.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/prefetch.inline.hpp"
  54 #include "services/memTracker.hpp"
  55 
  56 // Concurrent marking bit map wrapper
  57 
  58 CMBitMapRO::CMBitMapRO(int shifter) :
  59   _bm(),
  60   _shifter(shifter) {
  61   _bmStartWord = 0;
  62   _bmWordSize = 0;
  63 }
  64 
  65 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  66                                                const HeapWord* limit) const {
  67   // First we must round addr *up* to a possible object boundary.
  68   addr = (HeapWord*)align_size_up((intptr_t)addr,
  69                                   HeapWordSize << _shifter);
  70   size_t addrOffset = heapWordToOffset(addr);
  71   if (limit == NULL) {
  72     limit = _bmStartWord + _bmWordSize;
  73   }
  74   size_t limitOffset = heapWordToOffset(limit);
  75   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  76   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  77   assert(nextAddr >= addr, "get_next_one postcondition");
  78   assert(nextAddr == limit || isMarked(nextAddr),
  79          "get_next_one postcondition");
  80   return nextAddr;
  81 }
  82 
  83 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  84                                                  const HeapWord* limit) const {
  85   size_t addrOffset = heapWordToOffset(addr);
  86   if (limit == NULL) {
  87     limit = _bmStartWord + _bmWordSize;
  88   }
  89   size_t limitOffset = heapWordToOffset(limit);
  90   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  91   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  92   assert(nextAddr >= addr, "get_next_one postcondition");
  93   assert(nextAddr == limit || !isMarked(nextAddr),
  94          "get_next_one postcondition");
  95   return nextAddr;
  96 }
  97 
  98 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  99   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 100   return (int) (diff >> _shifter);
 101 }
 102 
 103 #ifndef PRODUCT
 104 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 105   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 106   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 107          "size inconsistency");
 108   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 109          _bmWordSize  == heap_rs.word_size();
 110 }
 111 #endif
 112 
 113 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 114   _bm.print_on_error(st, prefix);
 115 }
 116 
 117 size_t CMBitMap::compute_size(size_t heap_size) {
 118   return heap_size / mark_distance();
 119 }
 120 
 121 size_t CMBitMap::mark_distance() {
 122   return MinObjAlignmentInBytes * BitsPerByte;
 123 }
 124 
 125 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 126   _bmStartWord = heap.start();
 127   _bmWordSize = heap.word_size();
 128 
 129   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 130   _bm.set_size(_bmWordSize >> _shifter);
 131 
 132   storage->set_mapping_changed_listener(&_listener);
 133 }
 134 
 135 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 136   if (zero_filled) {
 137     return;
 138   }
 139   // We need to clear the bitmap on commit, removing any existing information.
 140   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 141   _bm->clearRange(mr);
 142 }
 143 
 144 // Closure used for clearing the given mark bitmap.
 145 class ClearBitmapHRClosure : public HeapRegionClosure {
 146  private:
 147   ConcurrentMark* _cm;
 148   CMBitMap* _bitmap;
 149   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 150  public:
 151   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 152     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 153   }
 154 
 155   virtual bool doHeapRegion(HeapRegion* r) {
 156     size_t const chunk_size_in_words = M / HeapWordSize;
 157 
 158     HeapWord* cur = r->bottom();
 159     HeapWord* const end = r->end();
 160 
 161     while (cur < end) {
 162       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 163       _bitmap->clearRange(mr);
 164 
 165       cur += chunk_size_in_words;
 166 
 167       // Abort iteration if after yielding the marking has been aborted.
 168       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 169         return true;
 170       }
 171       // Repeat the asserts from before the start of the closure. We will do them
 172       // as asserts here to minimize their overhead on the product. However, we
 173       // will have them as guarantees at the beginning / end of the bitmap
 174       // clearing to get some checking in the product.
 175       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 176       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 177     }
 178 
 179     return false;
 180   }
 181 };
 182 
 183 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 184   ClearBitmapHRClosure* _cl;
 185   HeapRegionClaimer     _hrclaimer;
 186   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 187 
 188 public:
 189   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 190       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 191 
 192   void work(uint worker_id) {
 193     if (_suspendible) {
 194       SuspendibleThreadSet::join();
 195     }
 196     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 197     if (_suspendible) {
 198       SuspendibleThreadSet::leave();
 199     }
 200   }
 201 };
 202 
 203 void CMBitMap::clearAll() {
 204   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 205   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 206   uint n_workers = g1h->workers()->active_workers();
 207   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 208   g1h->workers()->run_task(&task);
 209   guarantee(cl.complete(), "Must have completed iteration.");
 210   return;
 211 }
 212 
 213 void CMBitMap::markRange(MemRegion mr) {
 214   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 215   assert(!mr.is_empty(), "unexpected empty region");
 216   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 217           ((HeapWord *) mr.end())),
 218          "markRange memory region end is not card aligned");
 219   // convert address range into offset range
 220   _bm.at_put_range(heapWordToOffset(mr.start()),
 221                    heapWordToOffset(mr.end()), true);
 222 }
 223 
 224 void CMBitMap::clearRange(MemRegion mr) {
 225   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 226   assert(!mr.is_empty(), "unexpected empty region");
 227   // convert address range into offset range
 228   _bm.at_put_range(heapWordToOffset(mr.start()),
 229                    heapWordToOffset(mr.end()), false);
 230 }
 231 
 232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 233                                             HeapWord* end_addr) {
 234   HeapWord* start = getNextMarkedWordAddress(addr);
 235   start = MIN2(start, end_addr);
 236   HeapWord* end   = getNextUnmarkedWordAddress(start);
 237   end = MIN2(end, end_addr);
 238   assert(start <= end, "Consistency check");
 239   MemRegion mr(start, end);
 240   if (!mr.is_empty()) {
 241     clearRange(mr);
 242   }
 243   return mr;
 244 }
 245 
 246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 247   _base(NULL), _cm(cm)
 248 #ifdef ASSERT
 249   , _drain_in_progress(false)
 250   , _drain_in_progress_yields(false)
 251 #endif
 252 {}
 253 
 254 bool CMMarkStack::allocate(size_t capacity) {
 255   // allocate a stack of the requisite depth
 256   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 257   if (!rs.is_reserved()) {
 258     warning("ConcurrentMark MarkStack allocation failure");
 259     return false;
 260   }
 261   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 262   if (!_virtual_space.initialize(rs, rs.size())) {
 263     warning("ConcurrentMark MarkStack backing store failure");
 264     // Release the virtual memory reserved for the marking stack
 265     rs.release();
 266     return false;
 267   }
 268   assert(_virtual_space.committed_size() == rs.size(),
 269          "Didn't reserve backing store for all of ConcurrentMark stack?");
 270   _base = (oop*) _virtual_space.low();
 271   setEmpty();
 272   _capacity = (jint) capacity;
 273   _saved_index = -1;
 274   _should_expand = false;
 275   NOT_PRODUCT(_max_depth = 0);
 276   return true;
 277 }
 278 
 279 void CMMarkStack::expand() {
 280   // Called, during remark, if we've overflown the marking stack during marking.
 281   assert(isEmpty(), "stack should been emptied while handling overflow");
 282   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 283   // Clear expansion flag
 284   _should_expand = false;
 285   if (_capacity == (jint) MarkStackSizeMax) {
 286     if (PrintGCDetails && Verbose) {
 287       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 288     }
 289     return;
 290   }
 291   // Double capacity if possible
 292   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 293   // Do not give up existing stack until we have managed to
 294   // get the double capacity that we desired.
 295   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 296                                                            sizeof(oop)));
 297   if (rs.is_reserved()) {
 298     // Release the backing store associated with old stack
 299     _virtual_space.release();
 300     // Reinitialize virtual space for new stack
 301     if (!_virtual_space.initialize(rs, rs.size())) {
 302       fatal("Not enough swap for expanded marking stack capacity");
 303     }
 304     _base = (oop*)(_virtual_space.low());
 305     _index = 0;
 306     _capacity = new_capacity;
 307   } else {
 308     if (PrintGCDetails && Verbose) {
 309       // Failed to double capacity, continue;
 310       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 311                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 312                           _capacity / K, new_capacity / K);
 313     }
 314   }
 315 }
 316 
 317 void CMMarkStack::set_should_expand() {
 318   // If we're resetting the marking state because of an
 319   // marking stack overflow, record that we should, if
 320   // possible, expand the stack.
 321   _should_expand = _cm->has_overflown();
 322 }
 323 
 324 CMMarkStack::~CMMarkStack() {
 325   if (_base != NULL) {
 326     _base = NULL;
 327     _virtual_space.release();
 328   }
 329 }
 330 
 331 void CMMarkStack::par_push(oop ptr) {
 332   while (true) {
 333     if (isFull()) {
 334       _overflow = true;
 335       return;
 336     }
 337     // Otherwise...
 338     jint index = _index;
 339     jint next_index = index+1;
 340     jint res = Atomic::cmpxchg(next_index, &_index, index);
 341     if (res == index) {
 342       _base[index] = ptr;
 343       // Note that we don't maintain this atomically.  We could, but it
 344       // doesn't seem necessary.
 345       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 346       return;
 347     }
 348     // Otherwise, we need to try again.
 349   }
 350 }
 351 
 352 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 353   while (true) {
 354     if (isFull()) {
 355       _overflow = true;
 356       return;
 357     }
 358     // Otherwise...
 359     jint index = _index;
 360     jint next_index = index + n;
 361     if (next_index > _capacity) {
 362       _overflow = true;
 363       return;
 364     }
 365     jint res = Atomic::cmpxchg(next_index, &_index, index);
 366     if (res == index) {
 367       for (int i = 0; i < n; i++) {
 368         int  ind = index + i;
 369         assert(ind < _capacity, "By overflow test above.");
 370         _base[ind] = ptr_arr[i];
 371       }
 372       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 373       return;
 374     }
 375     // Otherwise, we need to try again.
 376   }
 377 }
 378 
 379 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 380   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 381   jint start = _index;
 382   jint next_index = start + n;
 383   if (next_index > _capacity) {
 384     _overflow = true;
 385     return;
 386   }
 387   // Otherwise.
 388   _index = next_index;
 389   for (int i = 0; i < n; i++) {
 390     int ind = start + i;
 391     assert(ind < _capacity, "By overflow test above.");
 392     _base[ind] = ptr_arr[i];
 393   }
 394   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 395 }
 396 
 397 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 398   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 399   jint index = _index;
 400   if (index == 0) {
 401     *n = 0;
 402     return false;
 403   } else {
 404     int k = MIN2(max, index);
 405     jint  new_ind = index - k;
 406     for (int j = 0; j < k; j++) {
 407       ptr_arr[j] = _base[new_ind + j];
 408     }
 409     _index = new_ind;
 410     *n = k;
 411     return true;
 412   }
 413 }
 414 
 415 template<class OopClosureClass>
 416 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 417   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 418          || SafepointSynchronize::is_at_safepoint(),
 419          "Drain recursion must be yield-safe.");
 420   bool res = true;
 421   debug_only(_drain_in_progress = true);
 422   debug_only(_drain_in_progress_yields = yield_after);
 423   while (!isEmpty()) {
 424     oop newOop = pop();
 425     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 426     assert(newOop->is_oop(), "Expected an oop");
 427     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 428            "only grey objects on this stack");
 429     newOop->oop_iterate(cl);
 430     if (yield_after && _cm->do_yield_check()) {
 431       res = false;
 432       break;
 433     }
 434   }
 435   debug_only(_drain_in_progress = false);
 436   return res;
 437 }
 438 
 439 void CMMarkStack::note_start_of_gc() {
 440   assert(_saved_index == -1,
 441          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 442   _saved_index = _index;
 443 }
 444 
 445 void CMMarkStack::note_end_of_gc() {
 446   // This is intentionally a guarantee, instead of an assert. If we
 447   // accidentally add something to the mark stack during GC, it
 448   // will be a correctness issue so it's better if we crash. we'll
 449   // only check this once per GC anyway, so it won't be a performance
 450   // issue in any way.
 451   guarantee(_saved_index == _index,
 452             err_msg("saved index: %d index: %d", _saved_index, _index));
 453   _saved_index = -1;
 454 }
 455 
 456 void CMMarkStack::oops_do(OopClosure* f) {
 457   assert(_saved_index == _index,
 458          err_msg("saved index: %d index: %d", _saved_index, _index));
 459   for (int i = 0; i < _index; i += 1) {
 460     f->do_oop(&_base[i]);
 461   }
 462 }
 463 
 464 CMRootRegions::CMRootRegions() :
 465   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 466   _should_abort(false),  _next_survivor(NULL) { }
 467 
 468 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 469   _young_list = g1h->young_list();
 470   _cm = cm;
 471 }
 472 
 473 void CMRootRegions::prepare_for_scan() {
 474   assert(!scan_in_progress(), "pre-condition");
 475 
 476   // Currently, only survivors can be root regions.
 477   assert(_next_survivor == NULL, "pre-condition");
 478   _next_survivor = _young_list->first_survivor_region();
 479   _scan_in_progress = (_next_survivor != NULL);
 480   _should_abort = false;
 481 }
 482 
 483 HeapRegion* CMRootRegions::claim_next() {
 484   if (_should_abort) {
 485     // If someone has set the should_abort flag, we return NULL to
 486     // force the caller to bail out of their loop.
 487     return NULL;
 488   }
 489 
 490   // Currently, only survivors can be root regions.
 491   HeapRegion* res = _next_survivor;
 492   if (res != NULL) {
 493     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 494     // Read it again in case it changed while we were waiting for the lock.
 495     res = _next_survivor;
 496     if (res != NULL) {
 497       if (res == _young_list->last_survivor_region()) {
 498         // We just claimed the last survivor so store NULL to indicate
 499         // that we're done.
 500         _next_survivor = NULL;
 501       } else {
 502         _next_survivor = res->get_next_young_region();
 503       }
 504     } else {
 505       // Someone else claimed the last survivor while we were trying
 506       // to take the lock so nothing else to do.
 507     }
 508   }
 509   assert(res == NULL || res->is_survivor(), "post-condition");
 510 
 511   return res;
 512 }
 513 
 514 void CMRootRegions::scan_finished() {
 515   assert(scan_in_progress(), "pre-condition");
 516 
 517   // Currently, only survivors can be root regions.
 518   if (!_should_abort) {
 519     assert(_next_survivor == NULL, "we should have claimed all survivors");
 520   }
 521   _next_survivor = NULL;
 522 
 523   {
 524     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 525     _scan_in_progress = false;
 526     RootRegionScan_lock->notify_all();
 527   }
 528 }
 529 
 530 bool CMRootRegions::wait_until_scan_finished() {
 531   if (!scan_in_progress()) return false;
 532 
 533   {
 534     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 535     while (scan_in_progress()) {
 536       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 537     }
 538   }
 539   return true;
 540 }
 541 
 542 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 543 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 544 #endif // _MSC_VER
 545 
 546 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 547   return MAX2((n_par_threads + 2) / 4, 1U);
 548 }
 549 
 550 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 551   _g1h(g1h),
 552   _markBitMap1(),
 553   _markBitMap2(),
 554   _parallel_marking_threads(0),
 555   _max_parallel_marking_threads(0),
 556   _sleep_factor(0.0),
 557   _marking_task_overhead(1.0),
 558   _cleanup_sleep_factor(0.0),
 559   _cleanup_task_overhead(1.0),
 560   _cleanup_list("Cleanup List"),
 561   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 562   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 563             CardTableModRefBS::card_shift,
 564             false /* in_resource_area*/),
 565 
 566   _prevMarkBitMap(&_markBitMap1),
 567   _nextMarkBitMap(&_markBitMap2),
 568 
 569   _markStack(this),
 570   // _finger set in set_non_marking_state
 571 
 572   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 573   // _active_tasks set in set_non_marking_state
 574   // _tasks set inside the constructor
 575   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 576   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 577 
 578   _has_overflown(false),
 579   _concurrent(false),
 580   _has_aborted(false),
 581   _aborted_gc_id(GCId::undefined()),
 582   _restart_for_overflow(false),
 583   _concurrent_marking_in_progress(false),
 584 
 585   // _verbose_level set below
 586 
 587   _init_times(),
 588   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 589   _cleanup_times(),
 590   _total_counting_time(0.0),
 591   _total_rs_scrub_time(0.0),
 592 
 593   _parallel_workers(NULL),
 594 
 595   _count_card_bitmaps(NULL),
 596   _count_marked_bytes(NULL),
 597   _completed_initialization(false) {
 598   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 599   if (verbose_level < no_verbose) {
 600     verbose_level = no_verbose;
 601   }
 602   if (verbose_level > high_verbose) {
 603     verbose_level = high_verbose;
 604   }
 605   _verbose_level = verbose_level;
 606 
 607   if (verbose_low()) {
 608     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 609                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 610   }
 611 
 612   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 613   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 614 
 615   // Create & start a ConcurrentMark thread.
 616   _cmThread = new ConcurrentMarkThread(this);
 617   assert(cmThread() != NULL, "CM Thread should have been created");
 618   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 619   if (_cmThread->osthread() == NULL) {
 620       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 621   }
 622 
 623   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 624   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 625   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 626 
 627   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 628   satb_qs.set_buffer_size(G1SATBBufferSize);
 629 
 630   _root_regions.init(_g1h, this);
 631 
 632   if (ConcGCThreads > ParallelGCThreads) {
 633     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 634             "than ParallelGCThreads (" UINTX_FORMAT ").",
 635             ConcGCThreads, ParallelGCThreads);
 636     return;
 637   }
 638   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 639     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 640     // if both are set
 641     _sleep_factor             = 0.0;
 642     _marking_task_overhead    = 1.0;
 643   } else if (G1MarkingOverheadPercent > 0) {
 644     // We will calculate the number of parallel marking threads based
 645     // on a target overhead with respect to the soft real-time goal
 646     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 647     double overall_cm_overhead =
 648       (double) MaxGCPauseMillis * marking_overhead /
 649       (double) GCPauseIntervalMillis;
 650     double cpu_ratio = 1.0 / (double) os::processor_count();
 651     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 652     double marking_task_overhead =
 653       overall_cm_overhead / marking_thread_num *
 654                                               (double) os::processor_count();
 655     double sleep_factor =
 656                        (1.0 - marking_task_overhead) / marking_task_overhead;
 657 
 658     FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 659     _sleep_factor             = sleep_factor;
 660     _marking_task_overhead    = marking_task_overhead;
 661   } else {
 662     // Calculate the number of parallel marking threads by scaling
 663     // the number of parallel GC threads.
 664     uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 665     FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 666     _sleep_factor             = 0.0;
 667     _marking_task_overhead    = 1.0;
 668   }
 669 
 670   assert(ConcGCThreads > 0, "Should have been set");
 671   _parallel_marking_threads = (uint) ConcGCThreads;
 672   _max_parallel_marking_threads = _parallel_marking_threads;
 673 
 674   if (parallel_marking_threads() > 1) {
 675     _cleanup_task_overhead = 1.0;
 676   } else {
 677     _cleanup_task_overhead = marking_task_overhead();
 678   }
 679   _cleanup_sleep_factor =
 680                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 681 
 682 #if 0
 683   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 684   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 685   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 686   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 687   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 688 #endif
 689 
 690   _parallel_workers = new FlexibleWorkGang("G1 Marker",
 691        _max_parallel_marking_threads, false, true);
 692   if (_parallel_workers == NULL) {
 693     vm_exit_during_initialization("Failed necessary allocation.");
 694   } else {
 695     _parallel_workers->initialize_workers();
 696   }
 697 
 698   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 699     uintx mark_stack_size =
 700       MIN2(MarkStackSizeMax,
 701           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 702     // Verify that the calculated value for MarkStackSize is in range.
 703     // It would be nice to use the private utility routine from Arguments.
 704     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 705       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 706               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 707               mark_stack_size, (uintx) 1, MarkStackSizeMax);
 708       return;
 709     }
 710     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 711   } else {
 712     // Verify MarkStackSize is in range.
 713     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 714       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 715         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 716           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 717                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 718                   MarkStackSize, (uintx) 1, MarkStackSizeMax);
 719           return;
 720         }
 721       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 722         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 723           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 724                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 725                   MarkStackSize, MarkStackSizeMax);
 726           return;
 727         }
 728       }
 729     }
 730   }
 731 
 732   if (!_markStack.allocate(MarkStackSize)) {
 733     warning("Failed to allocate CM marking stack");
 734     return;
 735   }
 736 
 737   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 738   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 739 
 740   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 741   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 742 
 743   BitMap::idx_t card_bm_size = _card_bm.size();
 744 
 745   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 746   _active_tasks = _max_worker_id;
 747 
 748   size_t max_regions = (size_t) _g1h->max_regions();
 749   for (uint i = 0; i < _max_worker_id; ++i) {
 750     CMTaskQueue* task_queue = new CMTaskQueue();
 751     task_queue->initialize();
 752     _task_queues->register_queue(i, task_queue);
 753 
 754     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 755     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 756 
 757     _tasks[i] = new CMTask(i, this,
 758                            _count_marked_bytes[i],
 759                            &_count_card_bitmaps[i],
 760                            task_queue, _task_queues);
 761 
 762     _accum_task_vtime[i] = 0.0;
 763   }
 764 
 765   // Calculate the card number for the bottom of the heap. Used
 766   // in biasing indexes into the accounting card bitmaps.
 767   _heap_bottom_card_num =
 768     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 769                                 CardTableModRefBS::card_shift);
 770 
 771   // Clear all the liveness counting data
 772   clear_all_count_data();
 773 
 774   // so that the call below can read a sensible value
 775   _heap_start = g1h->reserved_region().start();
 776   set_non_marking_state();
 777   _completed_initialization = true;
 778 }
 779 
 780 void ConcurrentMark::reset() {
 781   // Starting values for these two. This should be called in a STW
 782   // phase.
 783   MemRegion reserved = _g1h->g1_reserved();
 784   _heap_start = reserved.start();
 785   _heap_end   = reserved.end();
 786 
 787   // Separated the asserts so that we know which one fires.
 788   assert(_heap_start != NULL, "heap bounds should look ok");
 789   assert(_heap_end != NULL, "heap bounds should look ok");
 790   assert(_heap_start < _heap_end, "heap bounds should look ok");
 791 
 792   // Reset all the marking data structures and any necessary flags
 793   reset_marking_state();
 794 
 795   if (verbose_low()) {
 796     gclog_or_tty->print_cr("[global] resetting");
 797   }
 798 
 799   // We do reset all of them, since different phases will use
 800   // different number of active threads. So, it's easiest to have all
 801   // of them ready.
 802   for (uint i = 0; i < _max_worker_id; ++i) {
 803     _tasks[i]->reset(_nextMarkBitMap);
 804   }
 805 
 806   // we need this to make sure that the flag is on during the evac
 807   // pause with initial mark piggy-backed
 808   set_concurrent_marking_in_progress();
 809 }
 810 
 811 
 812 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 813   _markStack.set_should_expand();
 814   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 815   if (clear_overflow) {
 816     clear_has_overflown();
 817   } else {
 818     assert(has_overflown(), "pre-condition");
 819   }
 820   _finger = _heap_start;
 821 
 822   for (uint i = 0; i < _max_worker_id; ++i) {
 823     CMTaskQueue* queue = _task_queues->queue(i);
 824     queue->set_empty();
 825   }
 826 }
 827 
 828 void ConcurrentMark::set_concurrency(uint active_tasks) {
 829   assert(active_tasks <= _max_worker_id, "we should not have more");
 830 
 831   _active_tasks = active_tasks;
 832   // Need to update the three data structures below according to the
 833   // number of active threads for this phase.
 834   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 835   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 836   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 837 }
 838 
 839 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 840   set_concurrency(active_tasks);
 841 
 842   _concurrent = concurrent;
 843   // We propagate this to all tasks, not just the active ones.
 844   for (uint i = 0; i < _max_worker_id; ++i)
 845     _tasks[i]->set_concurrent(concurrent);
 846 
 847   if (concurrent) {
 848     set_concurrent_marking_in_progress();
 849   } else {
 850     // We currently assume that the concurrent flag has been set to
 851     // false before we start remark. At this point we should also be
 852     // in a STW phase.
 853     assert(!concurrent_marking_in_progress(), "invariant");
 854     assert(out_of_regions(),
 855            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 856                    p2i(_finger), p2i(_heap_end)));
 857   }
 858 }
 859 
 860 void ConcurrentMark::set_non_marking_state() {
 861   // We set the global marking state to some default values when we're
 862   // not doing marking.
 863   reset_marking_state();
 864   _active_tasks = 0;
 865   clear_concurrent_marking_in_progress();
 866 }
 867 
 868 ConcurrentMark::~ConcurrentMark() {
 869   // The ConcurrentMark instance is never freed.
 870   ShouldNotReachHere();
 871 }
 872 
 873 void ConcurrentMark::clearNextBitmap() {
 874   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 875 
 876   // Make sure that the concurrent mark thread looks to still be in
 877   // the current cycle.
 878   guarantee(cmThread()->during_cycle(), "invariant");
 879 
 880   // We are finishing up the current cycle by clearing the next
 881   // marking bitmap and getting it ready for the next cycle. During
 882   // this time no other cycle can start. So, let's make sure that this
 883   // is the case.
 884   guarantee(!g1h->mark_in_progress(), "invariant");
 885 
 886   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 887   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 888   _parallel_workers->run_task(&task);
 889 
 890   // Clear the liveness counting data. If the marking has been aborted, the abort()
 891   // call already did that.
 892   if (cl.complete()) {
 893     clear_all_count_data();
 894   }
 895 
 896   // Repeat the asserts from above.
 897   guarantee(cmThread()->during_cycle(), "invariant");
 898   guarantee(!g1h->mark_in_progress(), "invariant");
 899 }
 900 
 901 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 902   CMBitMap* _bitmap;
 903   bool _error;
 904  public:
 905   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 906   }
 907 
 908   virtual bool doHeapRegion(HeapRegion* r) {
 909     // This closure can be called concurrently to the mutator, so we must make sure
 910     // that the result of the getNextMarkedWordAddress() call is compared to the
 911     // value passed to it as limit to detect any found bits.
 912     // We can use the region's orig_end() for the limit and the comparison value
 913     // as it always contains the "real" end of the region that never changes and
 914     // has no side effects.
 915     // Due to the latter, there can also be no problem with the compiler generating
 916     // reloads of the orig_end() call.
 917     HeapWord* end = r->orig_end();
 918     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 919   }
 920 };
 921 
 922 bool ConcurrentMark::nextMarkBitmapIsClear() {
 923   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 924   _g1h->heap_region_iterate(&cl);
 925   return cl.complete();
 926 }
 927 
 928 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 929 public:
 930   bool doHeapRegion(HeapRegion* r) {
 931     if (!r->is_continues_humongous()) {
 932       r->note_start_of_marking();
 933     }
 934     return false;
 935   }
 936 };
 937 
 938 void ConcurrentMark::checkpointRootsInitialPre() {
 939   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 940   G1CollectorPolicy* g1p = g1h->g1_policy();
 941 
 942   _has_aborted = false;
 943 
 944 #ifndef PRODUCT
 945   if (G1PrintReachableAtInitialMark) {
 946     print_reachable("at-cycle-start",
 947                     VerifyOption_G1UsePrevMarking, true /* all */);
 948   }
 949 #endif
 950 
 951   // Initialize marking structures. This has to be done in a STW phase.
 952   reset();
 953 
 954   // For each region note start of marking.
 955   NoteStartOfMarkHRClosure startcl;
 956   g1h->heap_region_iterate(&startcl);
 957 }
 958 
 959 
 960 void ConcurrentMark::checkpointRootsInitialPost() {
 961   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 962 
 963   // If we force an overflow during remark, the remark operation will
 964   // actually abort and we'll restart concurrent marking. If we always
 965   // force an overflow during remark we'll never actually complete the
 966   // marking phase. So, we initialize this here, at the start of the
 967   // cycle, so that at the remaining overflow number will decrease at
 968   // every remark and we'll eventually not need to cause one.
 969   force_overflow_stw()->init();
 970 
 971   // Start Concurrent Marking weak-reference discovery.
 972   ReferenceProcessor* rp = g1h->ref_processor_cm();
 973   // enable ("weak") refs discovery
 974   rp->enable_discovery();
 975   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 976 
 977   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 978   // This is the start of  the marking cycle, we're expected all
 979   // threads to have SATB queues with active set to false.
 980   satb_mq_set.set_active_all_threads(true, /* new active value */
 981                                      false /* expected_active */);
 982 
 983   _root_regions.prepare_for_scan();
 984 
 985   // update_g1_committed() will be called at the end of an evac pause
 986   // when marking is on. So, it's also called at the end of the
 987   // initial-mark pause to update the heap end, if the heap expands
 988   // during it. No need to call it here.
 989 }
 990 
 991 /*
 992  * Notice that in the next two methods, we actually leave the STS
 993  * during the barrier sync and join it immediately afterwards. If we
 994  * do not do this, the following deadlock can occur: one thread could
 995  * be in the barrier sync code, waiting for the other thread to also
 996  * sync up, whereas another one could be trying to yield, while also
 997  * waiting for the other threads to sync up too.
 998  *
 999  * Note, however, that this code is also used during remark and in
1000  * this case we should not attempt to leave / enter the STS, otherwise
1001  * we'll either hit an assert (debug / fastdebug) or deadlock
1002  * (product). So we should only leave / enter the STS if we are
1003  * operating concurrently.
1004  *
1005  * Because the thread that does the sync barrier has left the STS, it
1006  * is possible to be suspended for a Full GC or an evacuation pause
1007  * could occur. This is actually safe, since the entering the sync
1008  * barrier is one of the last things do_marking_step() does, and it
1009  * doesn't manipulate any data structures afterwards.
1010  */
1011 
1012 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
1013   if (verbose_low()) {
1014     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
1015   }
1016 
1017   if (concurrent()) {
1018     SuspendibleThreadSet::leave();
1019   }
1020 
1021   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
1022 
1023   if (concurrent()) {
1024     SuspendibleThreadSet::join();
1025   }
1026   // at this point everyone should have synced up and not be doing any
1027   // more work
1028 
1029   if (verbose_low()) {
1030     if (barrier_aborted) {
1031       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1032     } else {
1033       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1034     }
1035   }
1036 
1037   if (barrier_aborted) {
1038     // If the barrier aborted we ignore the overflow condition and
1039     // just abort the whole marking phase as quickly as possible.
1040     return;
1041   }
1042 
1043   // If we're executing the concurrent phase of marking, reset the marking
1044   // state; otherwise the marking state is reset after reference processing,
1045   // during the remark pause.
1046   // If we reset here as a result of an overflow during the remark we will
1047   // see assertion failures from any subsequent set_concurrency_and_phase()
1048   // calls.
1049   if (concurrent()) {
1050     // let the task associated with with worker 0 do this
1051     if (worker_id == 0) {
1052       // task 0 is responsible for clearing the global data structures
1053       // We should be here because of an overflow. During STW we should
1054       // not clear the overflow flag since we rely on it being true when
1055       // we exit this method to abort the pause and restart concurrent
1056       // marking.
1057       reset_marking_state(true /* clear_overflow */);
1058       force_overflow()->update();
1059 
1060       if (G1Log::fine()) {
1061         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1062         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1063       }
1064     }
1065   }
1066 
1067   // after this, each task should reset its own data structures then
1068   // then go into the second barrier
1069 }
1070 
1071 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1072   if (verbose_low()) {
1073     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1074   }
1075 
1076   if (concurrent()) {
1077     SuspendibleThreadSet::leave();
1078   }
1079 
1080   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1081 
1082   if (concurrent()) {
1083     SuspendibleThreadSet::join();
1084   }
1085   // at this point everything should be re-initialized and ready to go
1086 
1087   if (verbose_low()) {
1088     if (barrier_aborted) {
1089       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1090     } else {
1091       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1092     }
1093   }
1094 }
1095 
1096 #ifndef PRODUCT
1097 void ForceOverflowSettings::init() {
1098   _num_remaining = G1ConcMarkForceOverflow;
1099   _force = false;
1100   update();
1101 }
1102 
1103 void ForceOverflowSettings::update() {
1104   if (_num_remaining > 0) {
1105     _num_remaining -= 1;
1106     _force = true;
1107   } else {
1108     _force = false;
1109   }
1110 }
1111 
1112 bool ForceOverflowSettings::should_force() {
1113   if (_force) {
1114     _force = false;
1115     return true;
1116   } else {
1117     return false;
1118   }
1119 }
1120 #endif // !PRODUCT
1121 
1122 class CMConcurrentMarkingTask: public AbstractGangTask {
1123 private:
1124   ConcurrentMark*       _cm;
1125   ConcurrentMarkThread* _cmt;
1126 
1127 public:
1128   void work(uint worker_id) {
1129     assert(Thread::current()->is_ConcurrentGC_thread(),
1130            "this should only be done by a conc GC thread");
1131     ResourceMark rm;
1132 
1133     double start_vtime = os::elapsedVTime();
1134 
1135     SuspendibleThreadSet::join();
1136 
1137     assert(worker_id < _cm->active_tasks(), "invariant");
1138     CMTask* the_task = _cm->task(worker_id);
1139     the_task->record_start_time();
1140     if (!_cm->has_aborted()) {
1141       do {
1142         double start_vtime_sec = os::elapsedVTime();
1143         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1144 
1145         the_task->do_marking_step(mark_step_duration_ms,
1146                                   true  /* do_termination */,
1147                                   false /* is_serial*/);
1148 
1149         double end_vtime_sec = os::elapsedVTime();
1150         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1151         _cm->clear_has_overflown();
1152 
1153         _cm->do_yield_check(worker_id);
1154 
1155         jlong sleep_time_ms;
1156         if (!_cm->has_aborted() && the_task->has_aborted()) {
1157           sleep_time_ms =
1158             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1159           SuspendibleThreadSet::leave();
1160           os::sleep(Thread::current(), sleep_time_ms, false);
1161           SuspendibleThreadSet::join();
1162         }
1163       } while (!_cm->has_aborted() && the_task->has_aborted());
1164     }
1165     the_task->record_end_time();
1166     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1167 
1168     SuspendibleThreadSet::leave();
1169 
1170     double end_vtime = os::elapsedVTime();
1171     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1172   }
1173 
1174   CMConcurrentMarkingTask(ConcurrentMark* cm,
1175                           ConcurrentMarkThread* cmt) :
1176       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1177 
1178   ~CMConcurrentMarkingTask() { }
1179 };
1180 
1181 // Calculates the number of active workers for a concurrent
1182 // phase.
1183 uint ConcurrentMark::calc_parallel_marking_threads() {
1184   uint n_conc_workers = 0;
1185   if (!UseDynamicNumberOfGCThreads ||
1186       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1187        !ForceDynamicNumberOfGCThreads)) {
1188     n_conc_workers = max_parallel_marking_threads();
1189   } else {
1190     n_conc_workers =
1191       AdaptiveSizePolicy::calc_default_active_workers(
1192                                    max_parallel_marking_threads(),
1193                                    1, /* Minimum workers */
1194                                    parallel_marking_threads(),
1195                                    Threads::number_of_non_daemon_threads());
1196     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1197     // that scaling has already gone into "_max_parallel_marking_threads".
1198   }
1199   assert(n_conc_workers > 0, "Always need at least 1");
1200   return n_conc_workers;
1201 }
1202 
1203 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1204   // Currently, only survivors can be root regions.
1205   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1206   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1207 
1208   const uintx interval = PrefetchScanIntervalInBytes;
1209   HeapWord* curr = hr->bottom();
1210   const HeapWord* end = hr->top();
1211   while (curr < end) {
1212     Prefetch::read(curr, interval);
1213     oop obj = oop(curr);
1214     int size = obj->oop_iterate(&cl);
1215     assert(size == obj->size(), "sanity");
1216     curr += size;
1217   }
1218 }
1219 
1220 class CMRootRegionScanTask : public AbstractGangTask {
1221 private:
1222   ConcurrentMark* _cm;
1223 
1224 public:
1225   CMRootRegionScanTask(ConcurrentMark* cm) :
1226     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1227 
1228   void work(uint worker_id) {
1229     assert(Thread::current()->is_ConcurrentGC_thread(),
1230            "this should only be done by a conc GC thread");
1231 
1232     CMRootRegions* root_regions = _cm->root_regions();
1233     HeapRegion* hr = root_regions->claim_next();
1234     while (hr != NULL) {
1235       _cm->scanRootRegion(hr, worker_id);
1236       hr = root_regions->claim_next();
1237     }
1238   }
1239 };
1240 
1241 void ConcurrentMark::scanRootRegions() {
1242   // Start of concurrent marking.
1243   ClassLoaderDataGraph::clear_claimed_marks();
1244 
1245   // scan_in_progress() will have been set to true only if there was
1246   // at least one root region to scan. So, if it's false, we
1247   // should not attempt to do any further work.
1248   if (root_regions()->scan_in_progress()) {
1249     _parallel_marking_threads = calc_parallel_marking_threads();
1250     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1251            "Maximum number of marking threads exceeded");
1252     uint active_workers = MAX2(1U, parallel_marking_threads());
1253 
1254     CMRootRegionScanTask task(this);
1255     _parallel_workers->set_active_workers(active_workers);
1256     _parallel_workers->run_task(&task);
1257 
1258     // It's possible that has_aborted() is true here without actually
1259     // aborting the survivor scan earlier. This is OK as it's
1260     // mainly used for sanity checking.
1261     root_regions()->scan_finished();
1262   }
1263 }
1264 
1265 void ConcurrentMark::markFromRoots() {
1266   // we might be tempted to assert that:
1267   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1268   //        "inconsistent argument?");
1269   // However that wouldn't be right, because it's possible that
1270   // a safepoint is indeed in progress as a younger generation
1271   // stop-the-world GC happens even as we mark in this generation.
1272 
1273   _restart_for_overflow = false;
1274   force_overflow_conc()->init();
1275 
1276   // _g1h has _n_par_threads
1277   _parallel_marking_threads = calc_parallel_marking_threads();
1278   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1279     "Maximum number of marking threads exceeded");
1280 
1281   uint active_workers = MAX2(1U, parallel_marking_threads());
1282 
1283   // Parallel task terminator is set in "set_concurrency_and_phase()"
1284   set_concurrency_and_phase(active_workers, true /* concurrent */);
1285 
1286   CMConcurrentMarkingTask markingTask(this, cmThread());
1287   _parallel_workers->set_active_workers(active_workers);
1288   // Don't set _n_par_threads because it affects MT in process_roots()
1289   // and the decisions on that MT processing is made elsewhere.
1290   assert(_parallel_workers->active_workers() > 0, "Should have been set");
1291   _parallel_workers->run_task(&markingTask);
1292   print_stats();
1293 }
1294 
1295 // Helper class to get rid of some boilerplate code.
1296 class G1CMTraceTime : public GCTraceTime {
1297   static bool doit_and_prepend(bool doit) {
1298     if (doit) {
1299       gclog_or_tty->put(' ');
1300     }
1301     return doit;
1302   }
1303 
1304  public:
1305   G1CMTraceTime(const char* title, bool doit)
1306     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1307         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1308   }
1309 };
1310 
1311 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1312   // world is stopped at this checkpoint
1313   assert(SafepointSynchronize::is_at_safepoint(),
1314          "world should be stopped");
1315 
1316   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1317 
1318   // If a full collection has happened, we shouldn't do this.
1319   if (has_aborted()) {
1320     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1321     return;
1322   }
1323 
1324   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1325 
1326   if (VerifyDuringGC) {
1327     HandleMark hm;  // handle scope
1328     Universe::heap()->prepare_for_verify();
1329     Universe::verify(VerifyOption_G1UsePrevMarking,
1330                      " VerifyDuringGC:(before)");
1331   }
1332   g1h->check_bitmaps("Remark Start");
1333 
1334   G1CollectorPolicy* g1p = g1h->g1_policy();
1335   g1p->record_concurrent_mark_remark_start();
1336 
1337   double start = os::elapsedTime();
1338 
1339   checkpointRootsFinalWork();
1340 
1341   double mark_work_end = os::elapsedTime();
1342 
1343   weakRefsWork(clear_all_soft_refs);
1344 
1345   if (has_overflown()) {
1346     // Oops.  We overflowed.  Restart concurrent marking.
1347     _restart_for_overflow = true;
1348     if (G1TraceMarkStackOverflow) {
1349       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1350     }
1351 
1352     // Verify the heap w.r.t. the previous marking bitmap.
1353     if (VerifyDuringGC) {
1354       HandleMark hm;  // handle scope
1355       Universe::heap()->prepare_for_verify();
1356       Universe::verify(VerifyOption_G1UsePrevMarking,
1357                        " VerifyDuringGC:(overflow)");
1358     }
1359 
1360     // Clear the marking state because we will be restarting
1361     // marking due to overflowing the global mark stack.
1362     reset_marking_state();
1363   } else {
1364     {
1365       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1366 
1367       // Aggregate the per-task counting data that we have accumulated
1368       // while marking.
1369       aggregate_count_data();
1370     }
1371 
1372     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1373     // We're done with marking.
1374     // This is the end of  the marking cycle, we're expected all
1375     // threads to have SATB queues with active set to true.
1376     satb_mq_set.set_active_all_threads(false, /* new active value */
1377                                        true /* expected_active */);
1378 
1379     if (VerifyDuringGC) {
1380       HandleMark hm;  // handle scope
1381       Universe::heap()->prepare_for_verify();
1382       Universe::verify(VerifyOption_G1UseNextMarking,
1383                        " VerifyDuringGC:(after)");
1384     }
1385     g1h->check_bitmaps("Remark End");
1386     assert(!restart_for_overflow(), "sanity");
1387     // Completely reset the marking state since marking completed
1388     set_non_marking_state();
1389   }
1390 
1391   // Expand the marking stack, if we have to and if we can.
1392   if (_markStack.should_expand()) {
1393     _markStack.expand();
1394   }
1395 
1396   // Statistics
1397   double now = os::elapsedTime();
1398   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1399   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1400   _remark_times.add((now - start) * 1000.0);
1401 
1402   g1p->record_concurrent_mark_remark_end();
1403 
1404   G1CMIsAliveClosure is_alive(g1h);
1405   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1406 }
1407 
1408 // Base class of the closures that finalize and verify the
1409 // liveness counting data.
1410 class CMCountDataClosureBase: public HeapRegionClosure {
1411 protected:
1412   G1CollectedHeap* _g1h;
1413   ConcurrentMark* _cm;
1414   CardTableModRefBS* _ct_bs;
1415 
1416   BitMap* _region_bm;
1417   BitMap* _card_bm;
1418 
1419   // Takes a region that's not empty (i.e., it has at least one
1420   // live object in it and sets its corresponding bit on the region
1421   // bitmap to 1. If the region is "starts humongous" it will also set
1422   // to 1 the bits on the region bitmap that correspond to its
1423   // associated "continues humongous" regions.
1424   void set_bit_for_region(HeapRegion* hr) {
1425     assert(!hr->is_continues_humongous(), "should have filtered those out");
1426 
1427     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1428     if (!hr->is_starts_humongous()) {
1429       // Normal (non-humongous) case: just set the bit.
1430       _region_bm->par_at_put(index, true);
1431     } else {
1432       // Starts humongous case: calculate how many regions are part of
1433       // this humongous region and then set the bit range.
1434       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1435       _region_bm->par_at_put_range(index, end_index, true);
1436     }
1437   }
1438 
1439 public:
1440   CMCountDataClosureBase(G1CollectedHeap* g1h,
1441                          BitMap* region_bm, BitMap* card_bm):
1442     _g1h(g1h), _cm(g1h->concurrent_mark()),
1443     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1444     _region_bm(region_bm), _card_bm(card_bm) { }
1445 };
1446 
1447 // Closure that calculates the # live objects per region. Used
1448 // for verification purposes during the cleanup pause.
1449 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1450   CMBitMapRO* _bm;
1451   size_t _region_marked_bytes;
1452 
1453 public:
1454   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1455                          BitMap* region_bm, BitMap* card_bm) :
1456     CMCountDataClosureBase(g1h, region_bm, card_bm),
1457     _bm(bm), _region_marked_bytes(0) { }
1458 
1459   bool doHeapRegion(HeapRegion* hr) {
1460 
1461     if (hr->is_continues_humongous()) {
1462       // We will ignore these here and process them when their
1463       // associated "starts humongous" region is processed (see
1464       // set_bit_for_heap_region()). Note that we cannot rely on their
1465       // associated "starts humongous" region to have their bit set to
1466       // 1 since, due to the region chunking in the parallel region
1467       // iteration, a "continues humongous" region might be visited
1468       // before its associated "starts humongous".
1469       return false;
1470     }
1471 
1472     HeapWord* ntams = hr->next_top_at_mark_start();
1473     HeapWord* start = hr->bottom();
1474 
1475     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1476            err_msg("Preconditions not met - "
1477                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1478                    p2i(start), p2i(ntams), p2i(hr->end())));
1479 
1480     // Find the first marked object at or after "start".
1481     start = _bm->getNextMarkedWordAddress(start, ntams);
1482 
1483     size_t marked_bytes = 0;
1484 
1485     while (start < ntams) {
1486       oop obj = oop(start);
1487       int obj_sz = obj->size();
1488       HeapWord* obj_end = start + obj_sz;
1489 
1490       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1491       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1492 
1493       // Note: if we're looking at the last region in heap - obj_end
1494       // could be actually just beyond the end of the heap; end_idx
1495       // will then correspond to a (non-existent) card that is also
1496       // just beyond the heap.
1497       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1498         // end of object is not card aligned - increment to cover
1499         // all the cards spanned by the object
1500         end_idx += 1;
1501       }
1502 
1503       // Set the bits in the card BM for the cards spanned by this object.
1504       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1505 
1506       // Add the size of this object to the number of marked bytes.
1507       marked_bytes += (size_t)obj_sz * HeapWordSize;
1508 
1509       // Find the next marked object after this one.
1510       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1511     }
1512 
1513     // Mark the allocated-since-marking portion...
1514     HeapWord* top = hr->top();
1515     if (ntams < top) {
1516       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1517       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1518 
1519       // Note: if we're looking at the last region in heap - top
1520       // could be actually just beyond the end of the heap; end_idx
1521       // will then correspond to a (non-existent) card that is also
1522       // just beyond the heap.
1523       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1524         // end of object is not card aligned - increment to cover
1525         // all the cards spanned by the object
1526         end_idx += 1;
1527       }
1528       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1529 
1530       // This definitely means the region has live objects.
1531       set_bit_for_region(hr);
1532     }
1533 
1534     // Update the live region bitmap.
1535     if (marked_bytes > 0) {
1536       set_bit_for_region(hr);
1537     }
1538 
1539     // Set the marked bytes for the current region so that
1540     // it can be queried by a calling verification routine
1541     _region_marked_bytes = marked_bytes;
1542 
1543     return false;
1544   }
1545 
1546   size_t region_marked_bytes() const { return _region_marked_bytes; }
1547 };
1548 
1549 // Heap region closure used for verifying the counting data
1550 // that was accumulated concurrently and aggregated during
1551 // the remark pause. This closure is applied to the heap
1552 // regions during the STW cleanup pause.
1553 
1554 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1555   G1CollectedHeap* _g1h;
1556   ConcurrentMark* _cm;
1557   CalcLiveObjectsClosure _calc_cl;
1558   BitMap* _region_bm;   // Region BM to be verified
1559   BitMap* _card_bm;     // Card BM to be verified
1560   bool _verbose;        // verbose output?
1561 
1562   BitMap* _exp_region_bm; // Expected Region BM values
1563   BitMap* _exp_card_bm;   // Expected card BM values
1564 
1565   int _failures;
1566 
1567 public:
1568   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1569                                 BitMap* region_bm,
1570                                 BitMap* card_bm,
1571                                 BitMap* exp_region_bm,
1572                                 BitMap* exp_card_bm,
1573                                 bool verbose) :
1574     _g1h(g1h), _cm(g1h->concurrent_mark()),
1575     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1576     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1577     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1578     _failures(0) { }
1579 
1580   int failures() const { return _failures; }
1581 
1582   bool doHeapRegion(HeapRegion* hr) {
1583     if (hr->is_continues_humongous()) {
1584       // We will ignore these here and process them when their
1585       // associated "starts humongous" region is processed (see
1586       // set_bit_for_heap_region()). Note that we cannot rely on their
1587       // associated "starts humongous" region to have their bit set to
1588       // 1 since, due to the region chunking in the parallel region
1589       // iteration, a "continues humongous" region might be visited
1590       // before its associated "starts humongous".
1591       return false;
1592     }
1593 
1594     int failures = 0;
1595 
1596     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1597     // this region and set the corresponding bits in the expected region
1598     // and card bitmaps.
1599     bool res = _calc_cl.doHeapRegion(hr);
1600     assert(res == false, "should be continuing");
1601 
1602     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1603                     Mutex::_no_safepoint_check_flag);
1604 
1605     // Verify the marked bytes for this region.
1606     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1607     size_t act_marked_bytes = hr->next_marked_bytes();
1608 
1609     // We're not OK if expected marked bytes > actual marked bytes. It means
1610     // we have missed accounting some objects during the actual marking.
1611     if (exp_marked_bytes > act_marked_bytes) {
1612       if (_verbose) {
1613         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1614                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1615                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1616       }
1617       failures += 1;
1618     }
1619 
1620     // Verify the bit, for this region, in the actual and expected
1621     // (which was just calculated) region bit maps.
1622     // We're not OK if the bit in the calculated expected region
1623     // bitmap is set and the bit in the actual region bitmap is not.
1624     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1625 
1626     bool expected = _exp_region_bm->at(index);
1627     bool actual = _region_bm->at(index);
1628     if (expected && !actual) {
1629       if (_verbose) {
1630         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1631                                "expected: %s, actual: %s",
1632                                hr->hrm_index(),
1633                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1634       }
1635       failures += 1;
1636     }
1637 
1638     // Verify that the card bit maps for the cards spanned by the current
1639     // region match. We have an error if we have a set bit in the expected
1640     // bit map and the corresponding bit in the actual bitmap is not set.
1641 
1642     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1643     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1644 
1645     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1646       expected = _exp_card_bm->at(i);
1647       actual = _card_bm->at(i);
1648 
1649       if (expected && !actual) {
1650         if (_verbose) {
1651           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1652                                  "expected: %s, actual: %s",
1653                                  hr->hrm_index(), i,
1654                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1655         }
1656         failures += 1;
1657       }
1658     }
1659 
1660     if (failures > 0 && _verbose)  {
1661       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1662                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1663                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1664                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1665     }
1666 
1667     _failures += failures;
1668 
1669     // We could stop iteration over the heap when we
1670     // find the first violating region by returning true.
1671     return false;
1672   }
1673 };
1674 
1675 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1676 protected:
1677   G1CollectedHeap* _g1h;
1678   ConcurrentMark* _cm;
1679   BitMap* _actual_region_bm;
1680   BitMap* _actual_card_bm;
1681 
1682   uint    _n_workers;
1683 
1684   BitMap* _expected_region_bm;
1685   BitMap* _expected_card_bm;
1686 
1687   int  _failures;
1688   bool _verbose;
1689 
1690   HeapRegionClaimer _hrclaimer;
1691 
1692 public:
1693   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1694                             BitMap* region_bm, BitMap* card_bm,
1695                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1696     : AbstractGangTask("G1 verify final counting"),
1697       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1698       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1699       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1700       _failures(0), _verbose(false),
1701       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1702     assert(VerifyDuringGC, "don't call this otherwise");
1703     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1704     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1705 
1706     _verbose = _cm->verbose_medium();
1707   }
1708 
1709   void work(uint worker_id) {
1710     assert(worker_id < _n_workers, "invariant");
1711 
1712     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1713                                             _actual_region_bm, _actual_card_bm,
1714                                             _expected_region_bm,
1715                                             _expected_card_bm,
1716                                             _verbose);
1717 
1718     _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
1719 
1720     Atomic::add(verify_cl.failures(), &_failures);
1721   }
1722 
1723   int failures() const { return _failures; }
1724 };
1725 
1726 // Closure that finalizes the liveness counting data.
1727 // Used during the cleanup pause.
1728 // Sets the bits corresponding to the interval [NTAMS, top]
1729 // (which contains the implicitly live objects) in the
1730 // card liveness bitmap. Also sets the bit for each region,
1731 // containing live data, in the region liveness bitmap.
1732 
1733 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1734  public:
1735   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1736                               BitMap* region_bm,
1737                               BitMap* card_bm) :
1738     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1739 
1740   bool doHeapRegion(HeapRegion* hr) {
1741 
1742     if (hr->is_continues_humongous()) {
1743       // We will ignore these here and process them when their
1744       // associated "starts humongous" region is processed (see
1745       // set_bit_for_heap_region()). Note that we cannot rely on their
1746       // associated "starts humongous" region to have their bit set to
1747       // 1 since, due to the region chunking in the parallel region
1748       // iteration, a "continues humongous" region might be visited
1749       // before its associated "starts humongous".
1750       return false;
1751     }
1752 
1753     HeapWord* ntams = hr->next_top_at_mark_start();
1754     HeapWord* top   = hr->top();
1755 
1756     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1757 
1758     // Mark the allocated-since-marking portion...
1759     if (ntams < top) {
1760       // This definitely means the region has live objects.
1761       set_bit_for_region(hr);
1762 
1763       // Now set the bits in the card bitmap for [ntams, top)
1764       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1765       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1766 
1767       // Note: if we're looking at the last region in heap - top
1768       // could be actually just beyond the end of the heap; end_idx
1769       // will then correspond to a (non-existent) card that is also
1770       // just beyond the heap.
1771       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1772         // end of object is not card aligned - increment to cover
1773         // all the cards spanned by the object
1774         end_idx += 1;
1775       }
1776 
1777       assert(end_idx <= _card_bm->size(),
1778              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1779                      end_idx, _card_bm->size()));
1780       assert(start_idx < _card_bm->size(),
1781              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1782                      start_idx, _card_bm->size()));
1783 
1784       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1785     }
1786 
1787     // Set the bit for the region if it contains live data
1788     if (hr->next_marked_bytes() > 0) {
1789       set_bit_for_region(hr);
1790     }
1791 
1792     return false;
1793   }
1794 };
1795 
1796 class G1ParFinalCountTask: public AbstractGangTask {
1797 protected:
1798   G1CollectedHeap* _g1h;
1799   ConcurrentMark* _cm;
1800   BitMap* _actual_region_bm;
1801   BitMap* _actual_card_bm;
1802 
1803   uint    _n_workers;
1804   HeapRegionClaimer _hrclaimer;
1805 
1806 public:
1807   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1808     : AbstractGangTask("G1 final counting"),
1809       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1810       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1811       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1812   }
1813 
1814   void work(uint worker_id) {
1815     assert(worker_id < _n_workers, "invariant");
1816 
1817     FinalCountDataUpdateClosure final_update_cl(_g1h,
1818                                                 _actual_region_bm,
1819                                                 _actual_card_bm);
1820 
1821     _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
1822   }
1823 };
1824 
1825 class G1ParNoteEndTask;
1826 
1827 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1828   G1CollectedHeap* _g1;
1829   size_t _max_live_bytes;
1830   uint _regions_claimed;
1831   size_t _freed_bytes;
1832   FreeRegionList* _local_cleanup_list;
1833   HeapRegionSetCount _old_regions_removed;
1834   HeapRegionSetCount _humongous_regions_removed;
1835   HRRSCleanupTask* _hrrs_cleanup_task;
1836   double _claimed_region_time;
1837   double _max_region_time;
1838 
1839 public:
1840   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1841                              FreeRegionList* local_cleanup_list,
1842                              HRRSCleanupTask* hrrs_cleanup_task) :
1843     _g1(g1),
1844     _max_live_bytes(0), _regions_claimed(0),
1845     _freed_bytes(0),
1846     _claimed_region_time(0.0), _max_region_time(0.0),
1847     _local_cleanup_list(local_cleanup_list),
1848     _old_regions_removed(),
1849     _humongous_regions_removed(),
1850     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1851 
1852   size_t freed_bytes() { return _freed_bytes; }
1853   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1854   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1855 
1856   bool doHeapRegion(HeapRegion *hr) {
1857     if (hr->is_continues_humongous()) {
1858       return false;
1859     }
1860     // We use a claim value of zero here because all regions
1861     // were claimed with value 1 in the FinalCount task.
1862     _g1->reset_gc_time_stamps(hr);
1863     double start = os::elapsedTime();
1864     _regions_claimed++;
1865     hr->note_end_of_marking();
1866     _max_live_bytes += hr->max_live_bytes();
1867 
1868     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1869       _freed_bytes += hr->used();
1870       hr->set_containing_set(NULL);
1871       if (hr->is_humongous()) {
1872         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1873         _humongous_regions_removed.increment(1u, hr->capacity());
1874         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1875       } else {
1876         _old_regions_removed.increment(1u, hr->capacity());
1877         _g1->free_region(hr, _local_cleanup_list, true);
1878       }
1879     } else {
1880       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1881     }
1882 
1883     double region_time = (os::elapsedTime() - start);
1884     _claimed_region_time += region_time;
1885     if (region_time > _max_region_time) {
1886       _max_region_time = region_time;
1887     }
1888     return false;
1889   }
1890 
1891   size_t max_live_bytes() { return _max_live_bytes; }
1892   uint regions_claimed() { return _regions_claimed; }
1893   double claimed_region_time_sec() { return _claimed_region_time; }
1894   double max_region_time_sec() { return _max_region_time; }
1895 };
1896 
1897 class G1ParNoteEndTask: public AbstractGangTask {
1898   friend class G1NoteEndOfConcMarkClosure;
1899 
1900 protected:
1901   G1CollectedHeap* _g1h;
1902   size_t _max_live_bytes;
1903   size_t _freed_bytes;
1904   FreeRegionList* _cleanup_list;
1905   HeapRegionClaimer _hrclaimer;
1906 
1907 public:
1908   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1909       AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1910   }
1911 
1912   void work(uint worker_id) {
1913     FreeRegionList local_cleanup_list("Local Cleanup List");
1914     HRRSCleanupTask hrrs_cleanup_task;
1915     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1916                                            &hrrs_cleanup_task);
1917     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1918     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1919 
1920     // Now update the lists
1921     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1922     {
1923       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1924       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1925       _max_live_bytes += g1_note_end.max_live_bytes();
1926       _freed_bytes += g1_note_end.freed_bytes();
1927 
1928       // If we iterate over the global cleanup list at the end of
1929       // cleanup to do this printing we will not guarantee to only
1930       // generate output for the newly-reclaimed regions (the list
1931       // might not be empty at the beginning of cleanup; we might
1932       // still be working on its previous contents). So we do the
1933       // printing here, before we append the new regions to the global
1934       // cleanup list.
1935 
1936       G1HRPrinter* hr_printer = _g1h->hr_printer();
1937       if (hr_printer->is_active()) {
1938         FreeRegionListIterator iter(&local_cleanup_list);
1939         while (iter.more_available()) {
1940           HeapRegion* hr = iter.get_next();
1941           hr_printer->cleanup(hr);
1942         }
1943       }
1944 
1945       _cleanup_list->add_ordered(&local_cleanup_list);
1946       assert(local_cleanup_list.is_empty(), "post-condition");
1947 
1948       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1949     }
1950   }
1951   size_t max_live_bytes() { return _max_live_bytes; }
1952   size_t freed_bytes() { return _freed_bytes; }
1953 };
1954 
1955 class G1ParScrubRemSetTask: public AbstractGangTask {
1956 protected:
1957   G1RemSet* _g1rs;
1958   BitMap* _region_bm;
1959   BitMap* _card_bm;
1960   HeapRegionClaimer _hrclaimer;
1961 
1962 public:
1963   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1964       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
1965   }
1966 
1967   void work(uint worker_id) {
1968     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
1969   }
1970 
1971 };
1972 
1973 void ConcurrentMark::cleanup() {
1974   // world is stopped at this checkpoint
1975   assert(SafepointSynchronize::is_at_safepoint(),
1976          "world should be stopped");
1977   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1978 
1979   // If a full collection has happened, we shouldn't do this.
1980   if (has_aborted()) {
1981     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1982     return;
1983   }
1984 
1985   g1h->verify_region_sets_optional();
1986 
1987   if (VerifyDuringGC) {
1988     HandleMark hm;  // handle scope
1989     Universe::heap()->prepare_for_verify();
1990     Universe::verify(VerifyOption_G1UsePrevMarking,
1991                      " VerifyDuringGC:(before)");
1992   }
1993   g1h->check_bitmaps("Cleanup Start");
1994 
1995   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1996   g1p->record_concurrent_mark_cleanup_start();
1997 
1998   double start = os::elapsedTime();
1999 
2000   HeapRegionRemSet::reset_for_cleanup_tasks();
2001 
2002   uint n_workers;
2003 
2004   // Do counting once more with the world stopped for good measure.
2005   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2006 
2007   g1h->set_par_threads();
2008   n_workers = g1h->n_par_threads();
2009   assert(g1h->n_par_threads() == n_workers,
2010          "Should not have been reset");
2011   g1h->workers()->run_task(&g1_par_count_task);
2012   // Done with the parallel phase so reset to 0.
2013   g1h->set_par_threads(0);
2014 
2015   if (VerifyDuringGC) {
2016     // Verify that the counting data accumulated during marking matches
2017     // that calculated by walking the marking bitmap.
2018 
2019     // Bitmaps to hold expected values
2020     BitMap expected_region_bm(_region_bm.size(), true);
2021     BitMap expected_card_bm(_card_bm.size(), true);
2022 
2023     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2024                                                  &_region_bm,
2025                                                  &_card_bm,
2026                                                  &expected_region_bm,
2027                                                  &expected_card_bm);
2028 
2029     g1h->set_par_threads((int)n_workers);
2030     g1h->workers()->run_task(&g1_par_verify_task);
2031     // Done with the parallel phase so reset to 0.
2032     g1h->set_par_threads(0);
2033 
2034     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2035   }
2036 
2037   size_t start_used_bytes = g1h->used();
2038   g1h->set_marking_complete();
2039 
2040   double count_end = os::elapsedTime();
2041   double this_final_counting_time = (count_end - start);
2042   _total_counting_time += this_final_counting_time;
2043 
2044   if (G1PrintRegionLivenessInfo) {
2045     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2046     _g1h->heap_region_iterate(&cl);
2047   }
2048 
2049   // Install newly created mark bitMap as "prev".
2050   swapMarkBitMaps();
2051 
2052   g1h->reset_gc_time_stamp();
2053 
2054   // Note end of marking in all heap regions.
2055   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
2056   g1h->set_par_threads((int)n_workers);
2057   g1h->workers()->run_task(&g1_par_note_end_task);
2058   g1h->set_par_threads(0);
2059   g1h->check_gc_time_stamps();
2060 
2061   if (!cleanup_list_is_empty()) {
2062     // The cleanup list is not empty, so we'll have to process it
2063     // concurrently. Notify anyone else that might be wanting free
2064     // regions that there will be more free regions coming soon.
2065     g1h->set_free_regions_coming();
2066   }
2067 
2068   // call below, since it affects the metric by which we sort the heap
2069   // regions.
2070   if (G1ScrubRemSets) {
2071     double rs_scrub_start = os::elapsedTime();
2072     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2073     g1h->set_par_threads((int)n_workers);
2074     g1h->workers()->run_task(&g1_par_scrub_rs_task);
2075     g1h->set_par_threads(0);
2076 
2077     double rs_scrub_end = os::elapsedTime();
2078     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2079     _total_rs_scrub_time += this_rs_scrub_time;
2080   }
2081 
2082   // this will also free any regions totally full of garbage objects,
2083   // and sort the regions.
2084   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2085 
2086   // Statistics.
2087   double end = os::elapsedTime();
2088   _cleanup_times.add((end - start) * 1000.0);
2089 
2090   if (G1Log::fine()) {
2091     g1h->print_size_transition(gclog_or_tty,
2092                                start_used_bytes,
2093                                g1h->used(),
2094                                g1h->capacity());
2095   }
2096 
2097   // Clean up will have freed any regions completely full of garbage.
2098   // Update the soft reference policy with the new heap occupancy.
2099   Universe::update_heap_info_at_gc();
2100 
2101   if (VerifyDuringGC) {
2102     HandleMark hm;  // handle scope
2103     Universe::heap()->prepare_for_verify();
2104     Universe::verify(VerifyOption_G1UsePrevMarking,
2105                      " VerifyDuringGC:(after)");
2106   }
2107 
2108   g1h->check_bitmaps("Cleanup End");
2109 
2110   g1h->verify_region_sets_optional();
2111 
2112   // We need to make this be a "collection" so any collection pause that
2113   // races with it goes around and waits for completeCleanup to finish.
2114   g1h->increment_total_collections();
2115 
2116   // Clean out dead classes and update Metaspace sizes.
2117   if (ClassUnloadingWithConcurrentMark) {
2118     ClassLoaderDataGraph::purge();
2119   }
2120   MetaspaceGC::compute_new_size();
2121 
2122   // We reclaimed old regions so we should calculate the sizes to make
2123   // sure we update the old gen/space data.
2124   g1h->g1mm()->update_sizes();
2125   g1h->allocation_context_stats().update_after_mark();
2126 
2127   g1h->trace_heap_after_concurrent_cycle();
2128 }
2129 
2130 void ConcurrentMark::completeCleanup() {
2131   if (has_aborted()) return;
2132 
2133   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2134 
2135   _cleanup_list.verify_optional();
2136   FreeRegionList tmp_free_list("Tmp Free List");
2137 
2138   if (G1ConcRegionFreeingVerbose) {
2139     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2140                            "cleanup list has %u entries",
2141                            _cleanup_list.length());
2142   }
2143 
2144   // No one else should be accessing the _cleanup_list at this point,
2145   // so it is not necessary to take any locks
2146   while (!_cleanup_list.is_empty()) {
2147     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2148     assert(hr != NULL, "Got NULL from a non-empty list");
2149     hr->par_clear();
2150     tmp_free_list.add_ordered(hr);
2151 
2152     // Instead of adding one region at a time to the secondary_free_list,
2153     // we accumulate them in the local list and move them a few at a
2154     // time. This also cuts down on the number of notify_all() calls
2155     // we do during this process. We'll also append the local list when
2156     // _cleanup_list is empty (which means we just removed the last
2157     // region from the _cleanup_list).
2158     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2159         _cleanup_list.is_empty()) {
2160       if (G1ConcRegionFreeingVerbose) {
2161         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2162                                "appending %u entries to the secondary_free_list, "
2163                                "cleanup list still has %u entries",
2164                                tmp_free_list.length(),
2165                                _cleanup_list.length());
2166       }
2167 
2168       {
2169         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2170         g1h->secondary_free_list_add(&tmp_free_list);
2171         SecondaryFreeList_lock->notify_all();
2172       }
2173 
2174       if (G1StressConcRegionFreeing) {
2175         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2176           os::sleep(Thread::current(), (jlong) 1, false);
2177         }
2178       }
2179     }
2180   }
2181   assert(tmp_free_list.is_empty(), "post-condition");
2182 }
2183 
2184 // Supporting Object and Oop closures for reference discovery
2185 // and processing in during marking
2186 
2187 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2188   HeapWord* addr = (HeapWord*)obj;
2189   return addr != NULL &&
2190          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2191 }
2192 
2193 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2194 // Uses the CMTask associated with a worker thread (for serial reference
2195 // processing the CMTask for worker 0 is used) to preserve (mark) and
2196 // trace referent objects.
2197 //
2198 // Using the CMTask and embedded local queues avoids having the worker
2199 // threads operating on the global mark stack. This reduces the risk
2200 // of overflowing the stack - which we would rather avoid at this late
2201 // state. Also using the tasks' local queues removes the potential
2202 // of the workers interfering with each other that could occur if
2203 // operating on the global stack.
2204 
2205 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2206   ConcurrentMark* _cm;
2207   CMTask*         _task;
2208   int             _ref_counter_limit;
2209   int             _ref_counter;
2210   bool            _is_serial;
2211  public:
2212   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2213     _cm(cm), _task(task), _is_serial(is_serial),
2214     _ref_counter_limit(G1RefProcDrainInterval) {
2215     assert(_ref_counter_limit > 0, "sanity");
2216     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2217     _ref_counter = _ref_counter_limit;
2218   }
2219 
2220   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2221   virtual void do_oop(      oop* p) { do_oop_work(p); }
2222 
2223   template <class T> void do_oop_work(T* p) {
2224     if (!_cm->has_overflown()) {
2225       oop obj = oopDesc::load_decode_heap_oop(p);
2226       if (_cm->verbose_high()) {
2227         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2228                                "*"PTR_FORMAT" = "PTR_FORMAT,
2229                                _task->worker_id(), p2i(p), p2i((void*) obj));
2230       }
2231 
2232       _task->deal_with_reference(obj);
2233       _ref_counter--;
2234 
2235       if (_ref_counter == 0) {
2236         // We have dealt with _ref_counter_limit references, pushing them
2237         // and objects reachable from them on to the local stack (and
2238         // possibly the global stack). Call CMTask::do_marking_step() to
2239         // process these entries.
2240         //
2241         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2242         // there's nothing more to do (i.e. we're done with the entries that
2243         // were pushed as a result of the CMTask::deal_with_reference() calls
2244         // above) or we overflow.
2245         //
2246         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2247         // flag while there may still be some work to do. (See the comment at
2248         // the beginning of CMTask::do_marking_step() for those conditions -
2249         // one of which is reaching the specified time target.) It is only
2250         // when CMTask::do_marking_step() returns without setting the
2251         // has_aborted() flag that the marking step has completed.
2252         do {
2253           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2254           _task->do_marking_step(mark_step_duration_ms,
2255                                  false      /* do_termination */,
2256                                  _is_serial);
2257         } while (_task->has_aborted() && !_cm->has_overflown());
2258         _ref_counter = _ref_counter_limit;
2259       }
2260     } else {
2261       if (_cm->verbose_high()) {
2262          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2263       }
2264     }
2265   }
2266 };
2267 
2268 // 'Drain' oop closure used by both serial and parallel reference processing.
2269 // Uses the CMTask associated with a given worker thread (for serial
2270 // reference processing the CMtask for worker 0 is used). Calls the
2271 // do_marking_step routine, with an unbelievably large timeout value,
2272 // to drain the marking data structures of the remaining entries
2273 // added by the 'keep alive' oop closure above.
2274 
2275 class G1CMDrainMarkingStackClosure: public VoidClosure {
2276   ConcurrentMark* _cm;
2277   CMTask*         _task;
2278   bool            _is_serial;
2279  public:
2280   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2281     _cm(cm), _task(task), _is_serial(is_serial) {
2282     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2283   }
2284 
2285   void do_void() {
2286     do {
2287       if (_cm->verbose_high()) {
2288         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2289                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2290       }
2291 
2292       // We call CMTask::do_marking_step() to completely drain the local
2293       // and global marking stacks of entries pushed by the 'keep alive'
2294       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2295       //
2296       // CMTask::do_marking_step() is called in a loop, which we'll exit
2297       // if there's nothing more to do (i.e. we've completely drained the
2298       // entries that were pushed as a a result of applying the 'keep alive'
2299       // closure to the entries on the discovered ref lists) or we overflow
2300       // the global marking stack.
2301       //
2302       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2303       // flag while there may still be some work to do. (See the comment at
2304       // the beginning of CMTask::do_marking_step() for those conditions -
2305       // one of which is reaching the specified time target.) It is only
2306       // when CMTask::do_marking_step() returns without setting the
2307       // has_aborted() flag that the marking step has completed.
2308 
2309       _task->do_marking_step(1000000000.0 /* something very large */,
2310                              true         /* do_termination */,
2311                              _is_serial);
2312     } while (_task->has_aborted() && !_cm->has_overflown());
2313   }
2314 };
2315 
2316 // Implementation of AbstractRefProcTaskExecutor for parallel
2317 // reference processing at the end of G1 concurrent marking
2318 
2319 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2320 private:
2321   G1CollectedHeap* _g1h;
2322   ConcurrentMark*  _cm;
2323   WorkGang*        _workers;
2324   int              _active_workers;
2325 
2326 public:
2327   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2328                         ConcurrentMark* cm,
2329                         WorkGang* workers,
2330                         int n_workers) :
2331     _g1h(g1h), _cm(cm),
2332     _workers(workers), _active_workers(n_workers) { }
2333 
2334   // Executes the given task using concurrent marking worker threads.
2335   virtual void execute(ProcessTask& task);
2336   virtual void execute(EnqueueTask& task);
2337 };
2338 
2339 class G1CMRefProcTaskProxy: public AbstractGangTask {
2340   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2341   ProcessTask&     _proc_task;
2342   G1CollectedHeap* _g1h;
2343   ConcurrentMark*  _cm;
2344 
2345 public:
2346   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2347                      G1CollectedHeap* g1h,
2348                      ConcurrentMark* cm) :
2349     AbstractGangTask("Process reference objects in parallel"),
2350     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2351     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2352     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2353   }
2354 
2355   virtual void work(uint worker_id) {
2356     ResourceMark rm;
2357     HandleMark hm;
2358     CMTask* task = _cm->task(worker_id);
2359     G1CMIsAliveClosure g1_is_alive(_g1h);
2360     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2361     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2362 
2363     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2364   }
2365 };
2366 
2367 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2368   assert(_workers != NULL, "Need parallel worker threads.");
2369   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2370 
2371   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2372 
2373   // We need to reset the concurrency level before each
2374   // proxy task execution, so that the termination protocol
2375   // and overflow handling in CMTask::do_marking_step() knows
2376   // how many workers to wait for.
2377   _cm->set_concurrency(_active_workers);
2378   _g1h->set_par_threads(_active_workers);
2379   _workers->run_task(&proc_task_proxy);
2380   _g1h->set_par_threads(0);
2381 }
2382 
2383 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2384   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2385   EnqueueTask& _enq_task;
2386 
2387 public:
2388   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2389     AbstractGangTask("Enqueue reference objects in parallel"),
2390     _enq_task(enq_task) { }
2391 
2392   virtual void work(uint worker_id) {
2393     _enq_task.work(worker_id);
2394   }
2395 };
2396 
2397 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2398   assert(_workers != NULL, "Need parallel worker threads.");
2399   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2400 
2401   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2402 
2403   // Not strictly necessary but...
2404   //
2405   // We need to reset the concurrency level before each
2406   // proxy task execution, so that the termination protocol
2407   // and overflow handling in CMTask::do_marking_step() knows
2408   // how many workers to wait for.
2409   _cm->set_concurrency(_active_workers);
2410   _g1h->set_par_threads(_active_workers);
2411   _workers->run_task(&enq_task_proxy);
2412   _g1h->set_par_threads(0);
2413 }
2414 
2415 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2416   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2417 }
2418 
2419 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2420   if (has_overflown()) {
2421     // Skip processing the discovered references if we have
2422     // overflown the global marking stack. Reference objects
2423     // only get discovered once so it is OK to not
2424     // de-populate the discovered reference lists. We could have,
2425     // but the only benefit would be that, when marking restarts,
2426     // less reference objects are discovered.
2427     return;
2428   }
2429 
2430   ResourceMark rm;
2431   HandleMark   hm;
2432 
2433   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2434 
2435   // Is alive closure.
2436   G1CMIsAliveClosure g1_is_alive(g1h);
2437 
2438   // Inner scope to exclude the cleaning of the string and symbol
2439   // tables from the displayed time.
2440   {
2441     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2442 
2443     ReferenceProcessor* rp = g1h->ref_processor_cm();
2444 
2445     // See the comment in G1CollectedHeap::ref_processing_init()
2446     // about how reference processing currently works in G1.
2447 
2448     // Set the soft reference policy
2449     rp->setup_policy(clear_all_soft_refs);
2450     assert(_markStack.isEmpty(), "mark stack should be empty");
2451 
2452     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2453     // in serial reference processing. Note these closures are also
2454     // used for serially processing (by the the current thread) the
2455     // JNI references during parallel reference processing.
2456     //
2457     // These closures do not need to synchronize with the worker
2458     // threads involved in parallel reference processing as these
2459     // instances are executed serially by the current thread (e.g.
2460     // reference processing is not multi-threaded and is thus
2461     // performed by the current thread instead of a gang worker).
2462     //
2463     // The gang tasks involved in parallel reference processing create
2464     // their own instances of these closures, which do their own
2465     // synchronization among themselves.
2466     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2467     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2468 
2469     // We need at least one active thread. If reference processing
2470     // is not multi-threaded we use the current (VMThread) thread,
2471     // otherwise we use the work gang from the G1CollectedHeap and
2472     // we utilize all the worker threads we can.
2473     bool processing_is_mt = rp->processing_is_mt();
2474     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2475     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2476 
2477     // Parallel processing task executor.
2478     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2479                                               g1h->workers(), active_workers);
2480     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2481 
2482     // Set the concurrency level. The phase was already set prior to
2483     // executing the remark task.
2484     set_concurrency(active_workers);
2485 
2486     // Set the degree of MT processing here.  If the discovery was done MT,
2487     // the number of threads involved during discovery could differ from
2488     // the number of active workers.  This is OK as long as the discovered
2489     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2490     rp->set_active_mt_degree(active_workers);
2491 
2492     // Process the weak references.
2493     const ReferenceProcessorStats& stats =
2494         rp->process_discovered_references(&g1_is_alive,
2495                                           &g1_keep_alive,
2496                                           &g1_drain_mark_stack,
2497                                           executor,
2498                                           g1h->gc_timer_cm(),
2499                                           concurrent_gc_id());
2500     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2501 
2502     // The do_oop work routines of the keep_alive and drain_marking_stack
2503     // oop closures will set the has_overflown flag if we overflow the
2504     // global marking stack.
2505 
2506     assert(_markStack.overflow() || _markStack.isEmpty(),
2507             "mark stack should be empty (unless it overflowed)");
2508 
2509     if (_markStack.overflow()) {
2510       // This should have been done already when we tried to push an
2511       // entry on to the global mark stack. But let's do it again.
2512       set_has_overflown();
2513     }
2514 
2515     assert(rp->num_q() == active_workers, "why not");
2516 
2517     rp->enqueue_discovered_references(executor);
2518 
2519     rp->verify_no_references_recorded();
2520     assert(!rp->discovery_enabled(), "Post condition");
2521   }
2522 
2523   if (has_overflown()) {
2524     // We can not trust g1_is_alive if the marking stack overflowed
2525     return;
2526   }
2527 
2528   assert(_markStack.isEmpty(), "Marking should have completed");
2529 
2530   // Unload Klasses, String, Symbols, Code Cache, etc.
2531   {
2532     G1CMTraceTime trace("Unloading", G1Log::finer());
2533 
2534     if (ClassUnloadingWithConcurrentMark) {
2535       // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
2536       // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
2537       // Defer the cleaning until we have complete on_stack data.
2538       MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
2539 
2540       bool purged_classes;
2541 
2542       {
2543         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2544         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2545       }
2546 
2547       {
2548         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2549         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2550       }
2551 
2552       {
2553         G1CMTraceTime trace("Deallocate Metadata", G1Log::finest());
2554         ClassLoaderDataGraph::free_deallocate_lists();
2555       }
2556     }
2557 
2558     if (G1StringDedup::is_enabled()) {
2559       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2560       G1StringDedup::unlink(&g1_is_alive);
2561     }
2562   }
2563 }
2564 
2565 void ConcurrentMark::swapMarkBitMaps() {
2566   CMBitMapRO* temp = _prevMarkBitMap;
2567   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2568   _nextMarkBitMap  = (CMBitMap*)  temp;
2569 }
2570 
2571 class CMObjectClosure;
2572 
2573 // Closure for iterating over objects, currently only used for
2574 // processing SATB buffers.
2575 class CMObjectClosure : public ObjectClosure {
2576 private:
2577   CMTask* _task;
2578 
2579 public:
2580   void do_object(oop obj) {
2581     _task->deal_with_reference(obj);
2582   }
2583 
2584   CMObjectClosure(CMTask* task) : _task(task) { }
2585 };
2586 
2587 class G1RemarkThreadsClosure : public ThreadClosure {
2588   CMObjectClosure _cm_obj;
2589   G1CMOopClosure _cm_cl;
2590   MarkingCodeBlobClosure _code_cl;
2591   int _thread_parity;
2592 
2593  public:
2594   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) :
2595     _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2596     _thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
2597 
2598   void do_thread(Thread* thread) {
2599     if (thread->is_Java_thread()) {
2600       if (thread->claim_oops_do(true, _thread_parity)) {
2601         JavaThread* jt = (JavaThread*)thread;
2602 
2603         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2604         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2605         // * Alive if on the stack of an executing method
2606         // * Weakly reachable otherwise
2607         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2608         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2609         jt->nmethods_do(&_code_cl);
2610 
2611         jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2612       }
2613     } else if (thread->is_VM_thread()) {
2614       if (thread->claim_oops_do(true, _thread_parity)) {
2615         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2616       }
2617     }
2618   }
2619 };
2620 
2621 class CMRemarkTask: public AbstractGangTask {
2622 private:
2623   ConcurrentMark* _cm;
2624 public:
2625   void work(uint worker_id) {
2626     // Since all available tasks are actually started, we should
2627     // only proceed if we're supposed to be active.
2628     if (worker_id < _cm->active_tasks()) {
2629       CMTask* task = _cm->task(worker_id);
2630       task->record_start_time();
2631       {
2632         ResourceMark rm;
2633         HandleMark hm;
2634 
2635         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2636         Threads::threads_do(&threads_f);
2637       }
2638 
2639       do {
2640         task->do_marking_step(1000000000.0 /* something very large */,
2641                               true         /* do_termination       */,
2642                               false        /* is_serial            */);
2643       } while (task->has_aborted() && !_cm->has_overflown());
2644       // If we overflow, then we do not want to restart. We instead
2645       // want to abort remark and do concurrent marking again.
2646       task->record_end_time();
2647     }
2648   }
2649 
2650   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2651     AbstractGangTask("Par Remark"), _cm(cm) {
2652     _cm->terminator()->reset_for_reuse(active_workers);
2653   }
2654 };
2655 
2656 void ConcurrentMark::checkpointRootsFinalWork() {
2657   ResourceMark rm;
2658   HandleMark   hm;
2659   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2660 
2661   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2662 
2663   g1h->ensure_parsability(false);
2664 
2665   G1CollectedHeap::StrongRootsScope srs(g1h);
2666   // this is remark, so we'll use up all active threads
2667   uint active_workers = g1h->workers()->active_workers();
2668   if (active_workers == 0) {
2669     assert(active_workers > 0, "Should have been set earlier");
2670     active_workers = (uint) ParallelGCThreads;
2671     g1h->workers()->set_active_workers(active_workers);
2672   }
2673   set_concurrency_and_phase(active_workers, false /* concurrent */);
2674   // Leave _parallel_marking_threads at it's
2675   // value originally calculated in the ConcurrentMark
2676   // constructor and pass values of the active workers
2677   // through the gang in the task.
2678 
2679   CMRemarkTask remarkTask(this, active_workers);
2680   // We will start all available threads, even if we decide that the
2681   // active_workers will be fewer. The extra ones will just bail out
2682   // immediately.
2683   g1h->set_par_threads(active_workers);
2684   g1h->workers()->run_task(&remarkTask);
2685   g1h->set_par_threads(0);
2686 
2687   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2688   guarantee(has_overflown() ||
2689             satb_mq_set.completed_buffers_num() == 0,
2690             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2691                     BOOL_TO_STR(has_overflown()),
2692                     satb_mq_set.completed_buffers_num()));
2693 
2694   print_stats();
2695 }
2696 
2697 #ifndef PRODUCT
2698 
2699 class PrintReachableOopClosure: public OopClosure {
2700 private:
2701   G1CollectedHeap* _g1h;
2702   outputStream*    _out;
2703   VerifyOption     _vo;
2704   bool             _all;
2705 
2706 public:
2707   PrintReachableOopClosure(outputStream* out,
2708                            VerifyOption  vo,
2709                            bool          all) :
2710     _g1h(G1CollectedHeap::heap()),
2711     _out(out), _vo(vo), _all(all) { }
2712 
2713   void do_oop(narrowOop* p) { do_oop_work(p); }
2714   void do_oop(      oop* p) { do_oop_work(p); }
2715 
2716   template <class T> void do_oop_work(T* p) {
2717     oop         obj = oopDesc::load_decode_heap_oop(p);
2718     const char* str = NULL;
2719     const char* str2 = "";
2720 
2721     if (obj == NULL) {
2722       str = "";
2723     } else if (!_g1h->is_in_g1_reserved(obj)) {
2724       str = " O";
2725     } else {
2726       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2727       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2728       bool marked = _g1h->is_marked(obj, _vo);
2729 
2730       if (over_tams) {
2731         str = " >";
2732         if (marked) {
2733           str2 = " AND MARKED";
2734         }
2735       } else if (marked) {
2736         str = " M";
2737       } else {
2738         str = " NOT";
2739       }
2740     }
2741 
2742     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2743                    p2i(p), p2i((void*) obj), str, str2);
2744   }
2745 };
2746 
2747 class PrintReachableObjectClosure : public ObjectClosure {
2748 private:
2749   G1CollectedHeap* _g1h;
2750   outputStream*    _out;
2751   VerifyOption     _vo;
2752   bool             _all;
2753   HeapRegion*      _hr;
2754 
2755 public:
2756   PrintReachableObjectClosure(outputStream* out,
2757                               VerifyOption  vo,
2758                               bool          all,
2759                               HeapRegion*   hr) :
2760     _g1h(G1CollectedHeap::heap()),
2761     _out(out), _vo(vo), _all(all), _hr(hr) { }
2762 
2763   void do_object(oop o) {
2764     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2765     bool marked = _g1h->is_marked(o, _vo);
2766     bool print_it = _all || over_tams || marked;
2767 
2768     if (print_it) {
2769       _out->print_cr(" "PTR_FORMAT"%s",
2770                      p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
2771       PrintReachableOopClosure oopCl(_out, _vo, _all);
2772       o->oop_iterate_no_header(&oopCl);
2773     }
2774   }
2775 };
2776 
2777 class PrintReachableRegionClosure : public HeapRegionClosure {
2778 private:
2779   G1CollectedHeap* _g1h;
2780   outputStream*    _out;
2781   VerifyOption     _vo;
2782   bool             _all;
2783 
2784 public:
2785   bool doHeapRegion(HeapRegion* hr) {
2786     HeapWord* b = hr->bottom();
2787     HeapWord* e = hr->end();
2788     HeapWord* t = hr->top();
2789     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2790     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2791                    "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
2792     _out->cr();
2793 
2794     HeapWord* from = b;
2795     HeapWord* to   = t;
2796 
2797     if (to > from) {
2798       _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
2799       _out->cr();
2800       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2801       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2802       _out->cr();
2803     }
2804 
2805     return false;
2806   }
2807 
2808   PrintReachableRegionClosure(outputStream* out,
2809                               VerifyOption  vo,
2810                               bool          all) :
2811     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2812 };
2813 
2814 void ConcurrentMark::print_reachable(const char* str,
2815                                      VerifyOption vo,
2816                                      bool all) {
2817   gclog_or_tty->cr();
2818   gclog_or_tty->print_cr("== Doing heap dump... ");
2819 
2820   if (G1PrintReachableBaseFile == NULL) {
2821     gclog_or_tty->print_cr("  #### error: no base file defined");
2822     return;
2823   }
2824 
2825   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2826       (JVM_MAXPATHLEN - 1)) {
2827     gclog_or_tty->print_cr("  #### error: file name too long");
2828     return;
2829   }
2830 
2831   char file_name[JVM_MAXPATHLEN];
2832   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2833   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2834 
2835   fileStream fout(file_name);
2836   if (!fout.is_open()) {
2837     gclog_or_tty->print_cr("  #### error: could not open file");
2838     return;
2839   }
2840 
2841   outputStream* out = &fout;
2842   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2843   out->cr();
2844 
2845   out->print_cr("--- ITERATING OVER REGIONS");
2846   out->cr();
2847   PrintReachableRegionClosure rcl(out, vo, all);
2848   _g1h->heap_region_iterate(&rcl);
2849   out->cr();
2850 
2851   gclog_or_tty->print_cr("  done");
2852   gclog_or_tty->flush();
2853 }
2854 
2855 #endif // PRODUCT
2856 
2857 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2858   // Note we are overriding the read-only view of the prev map here, via
2859   // the cast.
2860   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2861 }
2862 
2863 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2864   _nextMarkBitMap->clearRange(mr);
2865 }
2866 
2867 HeapRegion*
2868 ConcurrentMark::claim_region(uint worker_id) {
2869   // "checkpoint" the finger
2870   HeapWord* finger = _finger;
2871 
2872   // _heap_end will not change underneath our feet; it only changes at
2873   // yield points.
2874   while (finger < _heap_end) {
2875     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2876 
2877     // Note on how this code handles humongous regions. In the
2878     // normal case the finger will reach the start of a "starts
2879     // humongous" (SH) region. Its end will either be the end of the
2880     // last "continues humongous" (CH) region in the sequence, or the
2881     // standard end of the SH region (if the SH is the only region in
2882     // the sequence). That way claim_region() will skip over the CH
2883     // regions. However, there is a subtle race between a CM thread
2884     // executing this method and a mutator thread doing a humongous
2885     // object allocation. The two are not mutually exclusive as the CM
2886     // thread does not need to hold the Heap_lock when it gets
2887     // here. So there is a chance that claim_region() will come across
2888     // a free region that's in the progress of becoming a SH or a CH
2889     // region. In the former case, it will either
2890     //   a) Miss the update to the region's end, in which case it will
2891     //      visit every subsequent CH region, will find their bitmaps
2892     //      empty, and do nothing, or
2893     //   b) Will observe the update of the region's end (in which case
2894     //      it will skip the subsequent CH regions).
2895     // If it comes across a region that suddenly becomes CH, the
2896     // scenario will be similar to b). So, the race between
2897     // claim_region() and a humongous object allocation might force us
2898     // to do a bit of unnecessary work (due to some unnecessary bitmap
2899     // iterations) but it should not introduce and correctness issues.
2900     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2901 
2902     // Above heap_region_containing_raw may return NULL as we always scan claim
2903     // until the end of the heap. In this case, just jump to the next region.
2904     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2905 
2906     // Is the gap between reading the finger and doing the CAS too long?
2907     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2908     if (res == finger && curr_region != NULL) {
2909       // we succeeded
2910       HeapWord*   bottom        = curr_region->bottom();
2911       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2912 
2913       if (verbose_low()) {
2914         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2915                                "["PTR_FORMAT", "PTR_FORMAT"), "
2916                                "limit = "PTR_FORMAT,
2917                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2918       }
2919 
2920       // notice that _finger == end cannot be guaranteed here since,
2921       // someone else might have moved the finger even further
2922       assert(_finger >= end, "the finger should have moved forward");
2923 
2924       if (verbose_low()) {
2925         gclog_or_tty->print_cr("[%u] we were successful with region = "
2926                                PTR_FORMAT, worker_id, p2i(curr_region));
2927       }
2928 
2929       if (limit > bottom) {
2930         if (verbose_low()) {
2931           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2932                                  "returning it ", worker_id, p2i(curr_region));
2933         }
2934         return curr_region;
2935       } else {
2936         assert(limit == bottom,
2937                "the region limit should be at bottom");
2938         if (verbose_low()) {
2939           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2940                                  "returning NULL", worker_id, p2i(curr_region));
2941         }
2942         // we return NULL and the caller should try calling
2943         // claim_region() again.
2944         return NULL;
2945       }
2946     } else {
2947       assert(_finger > finger, "the finger should have moved forward");
2948       if (verbose_low()) {
2949         if (curr_region == NULL) {
2950           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2951                                  "global finger = "PTR_FORMAT", "
2952                                  "our finger = "PTR_FORMAT,
2953                                  worker_id, p2i(_finger), p2i(finger));
2954         } else {
2955           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2956                                  "global finger = "PTR_FORMAT", "
2957                                  "our finger = "PTR_FORMAT,
2958                                  worker_id, p2i(_finger), p2i(finger));
2959         }
2960       }
2961 
2962       // read it again
2963       finger = _finger;
2964     }
2965   }
2966 
2967   return NULL;
2968 }
2969 
2970 #ifndef PRODUCT
2971 enum VerifyNoCSetOopsPhase {
2972   VerifyNoCSetOopsStack,
2973   VerifyNoCSetOopsQueues,
2974   VerifyNoCSetOopsSATBCompleted,
2975   VerifyNoCSetOopsSATBThread
2976 };
2977 
2978 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2979 private:
2980   G1CollectedHeap* _g1h;
2981   ConcurrentMark* _cm;
2982   VerifyNoCSetOopsPhase _phase;
2983   int _info;
2984 
2985   const char* phase_str() {
2986     switch (_phase) {
2987     case VerifyNoCSetOopsStack:         return "Stack";
2988     case VerifyNoCSetOopsQueues:        return "Queue";
2989     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2990     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
2991     default:                            ShouldNotReachHere();
2992     }
2993     return NULL;
2994   }
2995 
2996   void do_object_work(oop obj) {
2997     switch (_phase) {
2998     case VerifyNoCSetOopsStack:
2999     case VerifyNoCSetOopsQueues:
3000       // Ignore reclaimed humongous object entries in mark stack and
3001       // thread queues.
3002       if (_cm->is_stale_humongous_marked_entry(obj)) break;
3003     default:
3004       guarantee(!_g1h->obj_in_cs(obj),
3005                 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
3006                         p2i((void*) obj), phase_str(), _info));
3007     }
3008   }
3009 
3010 public:
3011   VerifyNoCSetOopsClosure(G1CollectedHeap* g1h, ConcurrentMark* cm)
3012     : _g1h(g1h), _cm(cm) { }
3013 
3014   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3015     _phase = phase;
3016     _info = info;
3017   }
3018 
3019   virtual void do_oop(oop* p) {
3020     oop obj = oopDesc::load_decode_heap_oop(p);
3021     do_object_work(obj);
3022   }
3023 
3024   virtual void do_oop(narrowOop* p) {
3025     // We should not come across narrow oops while scanning marking
3026     // stacks and SATB buffers.
3027     ShouldNotReachHere();
3028   }
3029 
3030   virtual void do_object(oop obj) {
3031     do_object_work(obj);
3032   }
3033 };
3034 
3035 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
3036                                          bool verify_enqueued_buffers,
3037                                          bool verify_thread_buffers,
3038                                          bool verify_fingers) {
3039   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
3040   if (!G1CollectedHeap::heap()->mark_in_progress()) {
3041     return;
3042   }
3043 
3044   VerifyNoCSetOopsClosure cl(_g1h, this);
3045 
3046   if (verify_stacks) {
3047     // Verify entries on the global mark stack
3048     cl.set_phase(VerifyNoCSetOopsStack);
3049     _markStack.oops_do(&cl);
3050 
3051     // Verify entries on the task queues
3052     for (uint i = 0; i < _max_worker_id; i += 1) {
3053       cl.set_phase(VerifyNoCSetOopsQueues, i);
3054       CMTaskQueue* queue = _task_queues->queue(i);
3055       queue->oops_do(&cl);
3056     }
3057   }
3058 
3059   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
3060 
3061   // Verify entries on the enqueued SATB buffers
3062   if (verify_enqueued_buffers) {
3063     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
3064     satb_qs.iterate_completed_buffers_read_only(&cl);
3065   }
3066 
3067   // Verify entries on the per-thread SATB buffers
3068   if (verify_thread_buffers) {
3069     cl.set_phase(VerifyNoCSetOopsSATBThread);
3070     satb_qs.iterate_thread_buffers_read_only(&cl);
3071   }
3072 
3073   if (verify_fingers) {
3074     // Verify the global finger
3075     HeapWord* global_finger = finger();
3076     if (global_finger != NULL && global_finger < _heap_end) {
3077       // The global finger always points to a heap region boundary. We
3078       // use heap_region_containing_raw() to get the containing region
3079       // given that the global finger could be pointing to a free region
3080       // which subsequently becomes continues humongous. If that
3081       // happens, heap_region_containing() will return the bottom of the
3082       // corresponding starts humongous region and the check below will
3083       // not hold any more.
3084       // Since we always iterate over all regions, we might get a NULL HeapRegion
3085       // here.
3086       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3087       guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3088                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3089                         p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3090     }
3091 
3092     // Verify the task fingers
3093     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3094     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3095       CMTask* task = _tasks[i];
3096       HeapWord* task_finger = task->finger();
3097       if (task_finger != NULL && task_finger < _heap_end) {
3098         // See above note on the global finger verification.
3099         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3100         guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3101                   !task_hr->in_collection_set(),
3102                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3103                           p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3104       }
3105     }
3106   }
3107 }
3108 #endif // PRODUCT
3109 
3110 // Aggregate the counting data that was constructed concurrently
3111 // with marking.
3112 class AggregateCountDataHRClosure: public HeapRegionClosure {
3113   G1CollectedHeap* _g1h;
3114   ConcurrentMark* _cm;
3115   CardTableModRefBS* _ct_bs;
3116   BitMap* _cm_card_bm;
3117   uint _max_worker_id;
3118 
3119  public:
3120   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3121                               BitMap* cm_card_bm,
3122                               uint max_worker_id) :
3123     _g1h(g1h), _cm(g1h->concurrent_mark()),
3124     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
3125     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3126 
3127   bool doHeapRegion(HeapRegion* hr) {
3128     if (hr->is_continues_humongous()) {
3129       // We will ignore these here and process them when their
3130       // associated "starts humongous" region is processed.
3131       // Note that we cannot rely on their associated
3132       // "starts humongous" region to have their bit set to 1
3133       // since, due to the region chunking in the parallel region
3134       // iteration, a "continues humongous" region might be visited
3135       // before its associated "starts humongous".
3136       return false;
3137     }
3138 
3139     HeapWord* start = hr->bottom();
3140     HeapWord* limit = hr->next_top_at_mark_start();
3141     HeapWord* end = hr->end();
3142 
3143     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3144            err_msg("Preconditions not met - "
3145                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3146                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3147                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3148 
3149     assert(hr->next_marked_bytes() == 0, "Precondition");
3150 
3151     if (start == limit) {
3152       // NTAMS of this region has not been set so nothing to do.
3153       return false;
3154     }
3155 
3156     // 'start' should be in the heap.
3157     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3158     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
3159     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3160 
3161     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3162     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3163     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3164 
3165     // If ntams is not card aligned then we bump card bitmap index
3166     // for limit so that we get the all the cards spanned by
3167     // the object ending at ntams.
3168     // Note: if this is the last region in the heap then ntams
3169     // could be actually just beyond the end of the the heap;
3170     // limit_idx will then  correspond to a (non-existent) card
3171     // that is also outside the heap.
3172     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3173       limit_idx += 1;
3174     }
3175 
3176     assert(limit_idx <= end_idx, "or else use atomics");
3177 
3178     // Aggregate the "stripe" in the count data associated with hr.
3179     uint hrm_index = hr->hrm_index();
3180     size_t marked_bytes = 0;
3181 
3182     for (uint i = 0; i < _max_worker_id; i += 1) {
3183       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3184       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3185 
3186       // Fetch the marked_bytes in this region for task i and
3187       // add it to the running total for this region.
3188       marked_bytes += marked_bytes_array[hrm_index];
3189 
3190       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3191       // into the global card bitmap.
3192       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3193 
3194       while (scan_idx < limit_idx) {
3195         assert(task_card_bm->at(scan_idx) == true, "should be");
3196         _cm_card_bm->set_bit(scan_idx);
3197         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3198 
3199         // BitMap::get_next_one_offset() can handle the case when
3200         // its left_offset parameter is greater than its right_offset
3201         // parameter. It does, however, have an early exit if
3202         // left_offset == right_offset. So let's limit the value
3203         // passed in for left offset here.
3204         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3205         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3206       }
3207     }
3208 
3209     // Update the marked bytes for this region.
3210     hr->add_to_marked_bytes(marked_bytes);
3211 
3212     // Next heap region
3213     return false;
3214   }
3215 };
3216 
3217 class G1AggregateCountDataTask: public AbstractGangTask {
3218 protected:
3219   G1CollectedHeap* _g1h;
3220   ConcurrentMark* _cm;
3221   BitMap* _cm_card_bm;
3222   uint _max_worker_id;
3223   int _active_workers;
3224   HeapRegionClaimer _hrclaimer;
3225 
3226 public:
3227   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3228                            ConcurrentMark* cm,
3229                            BitMap* cm_card_bm,
3230                            uint max_worker_id,
3231                            int n_workers) :
3232       AbstractGangTask("Count Aggregation"),
3233       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3234       _max_worker_id(max_worker_id),
3235       _active_workers(n_workers),
3236       _hrclaimer(_active_workers) {
3237   }
3238 
3239   void work(uint worker_id) {
3240     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3241 
3242     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3243   }
3244 };
3245 
3246 
3247 void ConcurrentMark::aggregate_count_data() {
3248   int n_workers = _g1h->workers()->active_workers();
3249 
3250   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3251                                            _max_worker_id, n_workers);
3252 
3253   _g1h->set_par_threads(n_workers);
3254   _g1h->workers()->run_task(&g1_par_agg_task);
3255   _g1h->set_par_threads(0);
3256 }
3257 
3258 // Clear the per-worker arrays used to store the per-region counting data
3259 void ConcurrentMark::clear_all_count_data() {
3260   // Clear the global card bitmap - it will be filled during
3261   // liveness count aggregation (during remark) and the
3262   // final counting task.
3263   _card_bm.clear();
3264 
3265   // Clear the global region bitmap - it will be filled as part
3266   // of the final counting task.
3267   _region_bm.clear();
3268 
3269   uint max_regions = _g1h->max_regions();
3270   assert(_max_worker_id > 0, "uninitialized");
3271 
3272   for (uint i = 0; i < _max_worker_id; i += 1) {
3273     BitMap* task_card_bm = count_card_bitmap_for(i);
3274     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3275 
3276     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3277     assert(marked_bytes_array != NULL, "uninitialized");
3278 
3279     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3280     task_card_bm->clear();
3281   }
3282 }
3283 
3284 void ConcurrentMark::print_stats() {
3285   if (verbose_stats()) {
3286     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3287     for (size_t i = 0; i < _active_tasks; ++i) {
3288       _tasks[i]->print_stats();
3289       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3290     }
3291   }
3292 }
3293 
3294 // abandon current marking iteration due to a Full GC
3295 void ConcurrentMark::abort() {
3296   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3297   // concurrent bitmap clearing.
3298   _nextMarkBitMap->clearAll();
3299 
3300   // Note we cannot clear the previous marking bitmap here
3301   // since VerifyDuringGC verifies the objects marked during
3302   // a full GC against the previous bitmap.
3303 
3304   // Clear the liveness counting data
3305   clear_all_count_data();
3306   // Empty mark stack
3307   reset_marking_state();
3308   for (uint i = 0; i < _max_worker_id; ++i) {
3309     _tasks[i]->clear_region_fields();
3310   }
3311   _first_overflow_barrier_sync.abort();
3312   _second_overflow_barrier_sync.abort();
3313   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3314   if (!gc_id.is_undefined()) {
3315     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3316     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3317     _aborted_gc_id = gc_id;
3318    }
3319   _has_aborted = true;
3320 
3321   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3322   satb_mq_set.abandon_partial_marking();
3323   // This can be called either during or outside marking, we'll read
3324   // the expected_active value from the SATB queue set.
3325   satb_mq_set.set_active_all_threads(
3326                                  false, /* new active value */
3327                                  satb_mq_set.is_active() /* expected_active */);
3328 
3329   _g1h->trace_heap_after_concurrent_cycle();
3330   _g1h->register_concurrent_cycle_end();
3331 }
3332 
3333 const GCId& ConcurrentMark::concurrent_gc_id() {
3334   if (has_aborted()) {
3335     return _aborted_gc_id;
3336   }
3337   return _g1h->gc_tracer_cm()->gc_id();
3338 }
3339 
3340 static void print_ms_time_info(const char* prefix, const char* name,
3341                                NumberSeq& ns) {
3342   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3343                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3344   if (ns.num() > 0) {
3345     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3346                            prefix, ns.sd(), ns.maximum());
3347   }
3348 }
3349 
3350 void ConcurrentMark::print_summary_info() {
3351   gclog_or_tty->print_cr(" Concurrent marking:");
3352   print_ms_time_info("  ", "init marks", _init_times);
3353   print_ms_time_info("  ", "remarks", _remark_times);
3354   {
3355     print_ms_time_info("     ", "final marks", _remark_mark_times);
3356     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3357 
3358   }
3359   print_ms_time_info("  ", "cleanups", _cleanup_times);
3360   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3361                          _total_counting_time,
3362                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3363                           (double)_cleanup_times.num()
3364                          : 0.0));
3365   if (G1ScrubRemSets) {
3366     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3367                            _total_rs_scrub_time,
3368                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3369                             (double)_cleanup_times.num()
3370                            : 0.0));
3371   }
3372   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3373                          (_init_times.sum() + _remark_times.sum() +
3374                           _cleanup_times.sum())/1000.0);
3375   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3376                 "(%8.2f s marking).",
3377                 cmThread()->vtime_accum(),
3378                 cmThread()->vtime_mark_accum());
3379 }
3380 
3381 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3382   _parallel_workers->print_worker_threads_on(st);
3383 }
3384 
3385 void ConcurrentMark::print_on_error(outputStream* st) const {
3386   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3387       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3388   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3389   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3390 }
3391 
3392 // We take a break if someone is trying to stop the world.
3393 bool ConcurrentMark::do_yield_check(uint worker_id) {
3394   if (SuspendibleThreadSet::should_yield()) {
3395     if (worker_id == 0) {
3396       _g1h->g1_policy()->record_concurrent_pause();
3397     }
3398     SuspendibleThreadSet::yield();
3399     return true;
3400   } else {
3401     return false;
3402   }
3403 }
3404 
3405 #ifndef PRODUCT
3406 // for debugging purposes
3407 void ConcurrentMark::print_finger() {
3408   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3409                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3410   for (uint i = 0; i < _max_worker_id; ++i) {
3411     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3412   }
3413   gclog_or_tty->cr();
3414 }
3415 #endif
3416 
3417 void CMTask::scan_object(oop obj) {
3418   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3419   assert(!_g1h->is_on_master_free_list(_g1h->heap_region_containing(obj)), "invariant");
3420 
3421   if (_cm->verbose_high()) {
3422     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3423                            _worker_id, p2i((void*) obj));
3424   }
3425 
3426   size_t obj_size = obj->size();
3427   _words_scanned += obj_size;
3428 
3429   obj->oop_iterate(_cm_oop_closure);
3430   statsOnly( ++_objs_scanned );
3431   check_limits();
3432 }
3433 
3434 // Closure for iteration over bitmaps
3435 class CMBitMapClosure : public BitMapClosure {
3436 private:
3437   // the bitmap that is being iterated over
3438   CMBitMap*                   _nextMarkBitMap;
3439   ConcurrentMark*             _cm;
3440   CMTask*                     _task;
3441 
3442 public:
3443   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3444     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3445 
3446   bool do_bit(size_t offset) {
3447     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3448     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3449     assert( addr < _cm->finger(), "invariant");
3450 
3451     statsOnly( _task->increase_objs_found_on_bitmap() );
3452     assert(addr >= _task->finger(), "invariant");
3453 
3454     // We move that task's local finger along.
3455     _task->move_finger_to(addr);
3456 
3457     _task->scan_object(oop(addr));
3458     // we only partially drain the local queue and global stack
3459     _task->drain_local_queue(true);
3460     _task->drain_global_stack(true);
3461 
3462     // if the has_aborted flag has been raised, we need to bail out of
3463     // the iteration
3464     return !_task->has_aborted();
3465   }
3466 };
3467 
3468 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3469                                ConcurrentMark* cm,
3470                                CMTask* task)
3471   : _g1h(g1h), _cm(cm), _task(task) {
3472   assert(_ref_processor == NULL, "should be initialized to NULL");
3473 
3474   if (G1UseConcMarkReferenceProcessing) {
3475     _ref_processor = g1h->ref_processor_cm();
3476     assert(_ref_processor != NULL, "should not be NULL");
3477   }
3478 }
3479 
3480 void CMTask::setup_for_region(HeapRegion* hr) {
3481   assert(hr != NULL,
3482         "claim_region() should have filtered out NULL regions");
3483   assert(!hr->is_continues_humongous(),
3484         "claim_region() should have filtered out continues humongous regions");
3485 
3486   if (_cm->verbose_low()) {
3487     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3488                            _worker_id, p2i(hr));
3489   }
3490 
3491   _curr_region  = hr;
3492   _finger       = hr->bottom();
3493   update_region_limit();
3494 }
3495 
3496 void CMTask::update_region_limit() {
3497   HeapRegion* hr            = _curr_region;
3498   HeapWord* bottom          = hr->bottom();
3499   HeapWord* limit           = hr->next_top_at_mark_start();
3500 
3501   if (limit == bottom) {
3502     if (_cm->verbose_low()) {
3503       gclog_or_tty->print_cr("[%u] found an empty region "
3504                              "["PTR_FORMAT", "PTR_FORMAT")",
3505                              _worker_id, p2i(bottom), p2i(limit));
3506     }
3507     // The region was collected underneath our feet.
3508     // We set the finger to bottom to ensure that the bitmap
3509     // iteration that will follow this will not do anything.
3510     // (this is not a condition that holds when we set the region up,
3511     // as the region is not supposed to be empty in the first place)
3512     _finger = bottom;
3513   } else if (limit >= _region_limit) {
3514     assert(limit >= _finger, "peace of mind");
3515   } else {
3516     assert(limit < _region_limit, "only way to get here");
3517     // This can happen under some pretty unusual circumstances.  An
3518     // evacuation pause empties the region underneath our feet (NTAMS
3519     // at bottom). We then do some allocation in the region (NTAMS
3520     // stays at bottom), followed by the region being used as a GC
3521     // alloc region (NTAMS will move to top() and the objects
3522     // originally below it will be grayed). All objects now marked in
3523     // the region are explicitly grayed, if below the global finger,
3524     // and we do not need in fact to scan anything else. So, we simply
3525     // set _finger to be limit to ensure that the bitmap iteration
3526     // doesn't do anything.
3527     _finger = limit;
3528   }
3529 
3530   _region_limit = limit;
3531 }
3532 
3533 void CMTask::giveup_current_region() {
3534   assert(_curr_region != NULL, "invariant");
3535   if (_cm->verbose_low()) {
3536     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3537                            _worker_id, p2i(_curr_region));
3538   }
3539   clear_region_fields();
3540 }
3541 
3542 void CMTask::clear_region_fields() {
3543   // Values for these three fields that indicate that we're not
3544   // holding on to a region.
3545   _curr_region   = NULL;
3546   _finger        = NULL;
3547   _region_limit  = NULL;
3548 }
3549 
3550 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3551   if (cm_oop_closure == NULL) {
3552     assert(_cm_oop_closure != NULL, "invariant");
3553   } else {
3554     assert(_cm_oop_closure == NULL, "invariant");
3555   }
3556   _cm_oop_closure = cm_oop_closure;
3557 }
3558 
3559 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3560   guarantee(nextMarkBitMap != NULL, "invariant");
3561 
3562   if (_cm->verbose_low()) {
3563     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3564   }
3565 
3566   _nextMarkBitMap                = nextMarkBitMap;
3567   clear_region_fields();
3568 
3569   _calls                         = 0;
3570   _elapsed_time_ms               = 0.0;
3571   _termination_time_ms           = 0.0;
3572   _termination_start_time_ms     = 0.0;
3573 
3574 #if _MARKING_STATS_
3575   _aborted                       = 0;
3576   _aborted_overflow              = 0;
3577   _aborted_cm_aborted            = 0;
3578   _aborted_yield                 = 0;
3579   _aborted_timed_out             = 0;
3580   _aborted_satb                  = 0;
3581   _aborted_termination           = 0;
3582   _steal_attempts                = 0;
3583   _steals                        = 0;
3584   _local_pushes                  = 0;
3585   _local_pops                    = 0;
3586   _local_max_size                = 0;
3587   _objs_scanned                  = 0;
3588   _stale_humongous_queue_entries = 0;
3589   _global_pushes                 = 0;
3590   _global_pops                   = 0;
3591   _global_max_size               = 0;
3592   _global_transfers_to           = 0;
3593   _global_transfers_from         = 0;
3594   _regions_claimed               = 0;
3595   _objs_found_on_bitmap          = 0;
3596   _satb_buffers_processed        = 0;
3597 #endif // _MARKING_STATS_
3598 }
3599 
3600 bool CMTask::should_exit_termination() {
3601   regular_clock_call();
3602   // This is called when we are in the termination protocol. We should
3603   // quit if, for some reason, this task wants to abort or the global
3604   // stack is not empty (this means that we can get work from it).
3605   return !_cm->mark_stack_empty() || has_aborted();
3606 }
3607 
3608 void CMTask::reached_limit() {
3609   assert(_words_scanned >= _words_scanned_limit ||
3610          _refs_reached >= _refs_reached_limit ,
3611          "shouldn't have been called otherwise");
3612   regular_clock_call();
3613 }
3614 
3615 void CMTask::regular_clock_call() {
3616   if (has_aborted()) return;
3617 
3618   // First, we need to recalculate the words scanned and refs reached
3619   // limits for the next clock call.
3620   recalculate_limits();
3621 
3622   // During the regular clock call we do the following
3623 
3624   // (1) If an overflow has been flagged, then we abort.
3625   if (_cm->has_overflown()) {
3626     set_has_aborted();
3627     return;
3628   }
3629 
3630   // If we are not concurrent (i.e. we're doing remark) we don't need
3631   // to check anything else. The other steps are only needed during
3632   // the concurrent marking phase.
3633   if (!concurrent()) return;
3634 
3635   // (2) If marking has been aborted for Full GC, then we also abort.
3636   if (_cm->has_aborted()) {
3637     set_has_aborted();
3638     statsOnly( ++_aborted_cm_aborted );
3639     return;
3640   }
3641 
3642   double curr_time_ms = os::elapsedVTime() * 1000.0;
3643 
3644   // (3) If marking stats are enabled, then we update the step history.
3645 #if _MARKING_STATS_
3646   if (_words_scanned >= _words_scanned_limit) {
3647     ++_clock_due_to_scanning;
3648   }
3649   if (_refs_reached >= _refs_reached_limit) {
3650     ++_clock_due_to_marking;
3651   }
3652 
3653   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3654   _interval_start_time_ms = curr_time_ms;
3655   _all_clock_intervals_ms.add(last_interval_ms);
3656 
3657   if (_cm->verbose_medium()) {
3658       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3659                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3660                         _worker_id, last_interval_ms,
3661                         _words_scanned,
3662                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3663                         _refs_reached,
3664                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3665   }
3666 #endif // _MARKING_STATS_
3667 
3668   // (4) We check whether we should yield. If we have to, then we abort.
3669   if (SuspendibleThreadSet::should_yield()) {
3670     // We should yield. To do this we abort the task. The caller is
3671     // responsible for yielding.
3672     set_has_aborted();
3673     statsOnly( ++_aborted_yield );
3674     return;
3675   }
3676 
3677   // (5) We check whether we've reached our time quota. If we have,
3678   // then we abort.
3679   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3680   if (elapsed_time_ms > _time_target_ms) {
3681     set_has_aborted();
3682     _has_timed_out = true;
3683     statsOnly( ++_aborted_timed_out );
3684     return;
3685   }
3686 
3687   // (6) Finally, we check whether there are enough completed STAB
3688   // buffers available for processing. If there are, we abort.
3689   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3690   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3691     if (_cm->verbose_low()) {
3692       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3693                              _worker_id);
3694     }
3695     // we do need to process SATB buffers, we'll abort and restart
3696     // the marking task to do so
3697     set_has_aborted();
3698     statsOnly( ++_aborted_satb );
3699     return;
3700   }
3701 }
3702 
3703 void CMTask::recalculate_limits() {
3704   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3705   _words_scanned_limit      = _real_words_scanned_limit;
3706 
3707   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3708   _refs_reached_limit       = _real_refs_reached_limit;
3709 }
3710 
3711 void CMTask::decrease_limits() {
3712   // This is called when we believe that we're going to do an infrequent
3713   // operation which will increase the per byte scanned cost (i.e. move
3714   // entries to/from the global stack). It basically tries to decrease the
3715   // scanning limit so that the clock is called earlier.
3716 
3717   if (_cm->verbose_medium()) {
3718     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3719   }
3720 
3721   _words_scanned_limit = _real_words_scanned_limit -
3722     3 * words_scanned_period / 4;
3723   _refs_reached_limit  = _real_refs_reached_limit -
3724     3 * refs_reached_period / 4;
3725 }
3726 
3727 void CMTask::move_entries_to_global_stack() {
3728   // local array where we'll store the entries that will be popped
3729   // from the local queue
3730   oop buffer[global_stack_transfer_size];
3731 
3732   int n = 0;
3733   oop obj;
3734   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3735     buffer[n] = obj;
3736     ++n;
3737   }
3738 
3739   if (n > 0) {
3740     // we popped at least one entry from the local queue
3741 
3742     statsOnly( ++_global_transfers_to; _local_pops += n );
3743 
3744     if (!_cm->mark_stack_push(buffer, n)) {
3745       if (_cm->verbose_low()) {
3746         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3747                                _worker_id);
3748       }
3749       set_has_aborted();
3750     } else {
3751       // the transfer was successful
3752 
3753       if (_cm->verbose_medium()) {
3754         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3755                                _worker_id, n);
3756       }
3757       statsOnly( size_t tmp_size = _cm->mark_stack_size();
3758                  if (tmp_size > _global_max_size) {
3759                    _global_max_size = tmp_size;
3760                  }
3761                  _global_pushes += n );
3762     }
3763   }
3764 
3765   // this operation was quite expensive, so decrease the limits
3766   decrease_limits();
3767 }
3768 
3769 void CMTask::get_entries_from_global_stack() {
3770   // local array where we'll store the entries that will be popped
3771   // from the global stack.
3772   oop buffer[global_stack_transfer_size];
3773   int n;
3774   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3775   assert(n <= global_stack_transfer_size,
3776          "we should not pop more than the given limit");
3777   if (n > 0) {
3778     // yes, we did actually pop at least one entry
3779 
3780     statsOnly( ++_global_transfers_from; _global_pops += n );
3781     if (_cm->verbose_medium()) {
3782       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3783                              _worker_id, n);
3784     }
3785     for (int i = 0; i < n; ++i) {
3786       bool success = _task_queue->push(buffer[i]);
3787       // We only call this when the local queue is empty or under a
3788       // given target limit. So, we do not expect this push to fail.
3789       assert(success, "invariant");
3790     }
3791 
3792     statsOnly( size_t tmp_size = (size_t)_task_queue->size();
3793                if (tmp_size > _local_max_size) {
3794                  _local_max_size = tmp_size;
3795                }
3796                _local_pushes += n );
3797   }
3798 
3799   // this operation was quite expensive, so decrease the limits
3800   decrease_limits();
3801 }
3802 
3803 void CMTask::drain_local_queue(bool partially) {
3804   if (has_aborted()) return;
3805 
3806   // Decide what the target size is, depending whether we're going to
3807   // drain it partially (so that other tasks can steal if they run out
3808   // of things to do) or totally (at the very end).
3809   size_t target_size;
3810   if (partially) {
3811     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3812   } else {
3813     target_size = 0;
3814   }
3815 
3816   if (_task_queue->size() > target_size) {
3817     if (_cm->verbose_high()) {
3818       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3819                              _worker_id, target_size);
3820     }
3821 
3822     oop obj;
3823     bool ret = _task_queue->pop_local(obj);
3824     while (ret) {
3825       statsOnly( ++_local_pops );
3826 
3827       if (_cm->verbose_high()) {
3828         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3829                                p2i((void*) obj));
3830       }
3831 
3832       process_queue_entry(obj);
3833 
3834       if (_task_queue->size() <= target_size || has_aborted()) {
3835         ret = false;
3836       } else {
3837         ret = _task_queue->pop_local(obj);
3838       }
3839     }
3840 
3841     if (_cm->verbose_high()) {
3842       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3843                              _worker_id, _task_queue->size());
3844     }
3845   }
3846 }
3847 
3848 void CMTask::drain_global_stack(bool partially) {
3849   if (has_aborted()) return;
3850 
3851   // We have a policy to drain the local queue before we attempt to
3852   // drain the global stack.
3853   assert(partially || _task_queue->size() == 0, "invariant");
3854 
3855   // Decide what the target size is, depending whether we're going to
3856   // drain it partially (so that other tasks can steal if they run out
3857   // of things to do) or totally (at the very end).  Notice that,
3858   // because we move entries from the global stack in chunks or
3859   // because another task might be doing the same, we might in fact
3860   // drop below the target. But, this is not a problem.
3861   size_t target_size;
3862   if (partially) {
3863     target_size = _cm->partial_mark_stack_size_target();
3864   } else {
3865     target_size = 0;
3866   }
3867 
3868   if (_cm->mark_stack_size() > target_size) {
3869     if (_cm->verbose_low()) {
3870       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3871                              _worker_id, target_size);
3872     }
3873 
3874     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3875       get_entries_from_global_stack();
3876       drain_local_queue(partially);
3877     }
3878 
3879     if (_cm->verbose_low()) {
3880       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3881                              _worker_id, _cm->mark_stack_size());
3882     }
3883   }
3884 }
3885 
3886 // SATB Queue has several assumptions on whether to call the par or
3887 // non-par versions of the methods. this is why some of the code is
3888 // replicated. We should really get rid of the single-threaded version
3889 // of the code to simplify things.
3890 void CMTask::drain_satb_buffers() {
3891   if (has_aborted()) return;
3892 
3893   // We set this so that the regular clock knows that we're in the
3894   // middle of draining buffers and doesn't set the abort flag when it
3895   // notices that SATB buffers are available for draining. It'd be
3896   // very counter productive if it did that. :-)
3897   _draining_satb_buffers = true;
3898 
3899   CMObjectClosure oc(this);
3900   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3901   satb_mq_set.set_closure(_worker_id, &oc);
3902 
3903   // This keeps claiming and applying the closure to completed buffers
3904   // until we run out of buffers or we need to abort.
3905   while (!has_aborted() &&
3906          satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) {
3907     if (_cm->verbose_medium()) {
3908       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3909     }
3910     statsOnly( ++_satb_buffers_processed );
3911     regular_clock_call();
3912   }
3913 
3914   _draining_satb_buffers = false;
3915 
3916   assert(has_aborted() ||
3917          concurrent() ||
3918          satb_mq_set.completed_buffers_num() == 0, "invariant");
3919 
3920   satb_mq_set.set_closure(_worker_id, NULL);
3921 
3922   // again, this was a potentially expensive operation, decrease the
3923   // limits to get the regular clock call early
3924   decrease_limits();
3925 }
3926 
3927 void CMTask::print_stats() {
3928   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3929                          _worker_id, _calls);
3930   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3931                          _elapsed_time_ms, _termination_time_ms);
3932   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3933                          _step_times_ms.num(), _step_times_ms.avg(),
3934                          _step_times_ms.sd());
3935   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3936                          _step_times_ms.maximum(), _step_times_ms.sum());
3937 
3938 #if _MARKING_STATS_
3939   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3940                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3941                          _all_clock_intervals_ms.sd());
3942   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3943                          _all_clock_intervals_ms.maximum(),
3944                          _all_clock_intervals_ms.sum());
3945   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT,
3946                          _clock_due_to_scanning, _clock_due_to_marking);
3947   gclog_or_tty->print_cr("  Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT,
3948                          _objs_scanned, _objs_found_on_bitmap);
3949   gclog_or_tty->print_cr("  Local Queue:  pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3950                          _local_pushes, _local_pops, _local_max_size);
3951   gclog_or_tty->print_cr("  Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3952                          _global_pushes, _global_pops, _global_max_size);
3953   gclog_or_tty->print_cr("                transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT,
3954                          _global_transfers_to,_global_transfers_from);
3955   gclog_or_tty->print_cr("  Regions: claimed = " SIZE_FORMAT, _regions_claimed);
3956   gclog_or_tty->print_cr("  SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed);
3957   gclog_or_tty->print_cr("  Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT,
3958                          _steal_attempts, _steals);
3959   gclog_or_tty->print_cr("  Skipped stale humongous queue entries = " SIZE_FORMAT,
3960                          _stale_humongous_queue_entries);
3961   gclog_or_tty->print_cr("  Aborted: " SIZE_FORMAT ", due to", _aborted);
3962   gclog_or_tty->print_cr("    overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT,
3963                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3964   gclog_or_tty->print_cr("    time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT,
3965                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3966 #endif // _MARKING_STATS_
3967 }
3968 
3969 /*****************************************************************************
3970 
3971     The do_marking_step(time_target_ms, ...) method is the building
3972     block of the parallel marking framework. It can be called in parallel
3973     with other invocations of do_marking_step() on different tasks
3974     (but only one per task, obviously) and concurrently with the
3975     mutator threads, or during remark, hence it eliminates the need
3976     for two versions of the code. When called during remark, it will
3977     pick up from where the task left off during the concurrent marking
3978     phase. Interestingly, tasks are also claimable during evacuation
3979     pauses too, since do_marking_step() ensures that it aborts before
3980     it needs to yield.
3981 
3982     The data structures that it uses to do marking work are the
3983     following:
3984 
3985       (1) Marking Bitmap. If there are gray objects that appear only
3986       on the bitmap (this happens either when dealing with an overflow
3987       or when the initial marking phase has simply marked the roots
3988       and didn't push them on the stack), then tasks claim heap
3989       regions whose bitmap they then scan to find gray objects. A
3990       global finger indicates where the end of the last claimed region
3991       is. A local finger indicates how far into the region a task has
3992       scanned. The two fingers are used to determine how to gray an
3993       object (i.e. whether simply marking it is OK, as it will be
3994       visited by a task in the future, or whether it needs to be also
3995       pushed on a stack).
3996 
3997       (2) Local Queue. The local queue of the task which is accessed
3998       reasonably efficiently by the task. Other tasks can steal from
3999       it when they run out of work. Throughout the marking phase, a
4000       task attempts to keep its local queue short but not totally
4001       empty, so that entries are available for stealing by other
4002       tasks. Only when there is no more work, a task will totally
4003       drain its local queue.
4004 
4005       (3) Global Mark Stack. This handles local queue overflow. During
4006       marking only sets of entries are moved between it and the local
4007       queues, as access to it requires a mutex and more fine-grain
4008       interaction with it which might cause contention. If it
4009       overflows, then the marking phase should restart and iterate
4010       over the bitmap to identify gray objects. Throughout the marking
4011       phase, tasks attempt to keep the global mark stack at a small
4012       length but not totally empty, so that entries are available for
4013       popping by other tasks. Only when there is no more work, tasks
4014       will totally drain the global mark stack.
4015 
4016       (4) SATB Buffer Queue. This is where completed SATB buffers are
4017       made available. Buffers are regularly removed from this queue
4018       and scanned for roots, so that the queue doesn't get too
4019       long. During remark, all completed buffers are processed, as
4020       well as the filled in parts of any uncompleted buffers.
4021 
4022     The do_marking_step() method tries to abort when the time target
4023     has been reached. There are a few other cases when the
4024     do_marking_step() method also aborts:
4025 
4026       (1) When the marking phase has been aborted (after a Full GC).
4027 
4028       (2) When a global overflow (on the global stack) has been
4029       triggered. Before the task aborts, it will actually sync up with
4030       the other tasks to ensure that all the marking data structures
4031       (local queues, stacks, fingers etc.)  are re-initialized so that
4032       when do_marking_step() completes, the marking phase can
4033       immediately restart.
4034 
4035       (3) When enough completed SATB buffers are available. The
4036       do_marking_step() method only tries to drain SATB buffers right
4037       at the beginning. So, if enough buffers are available, the
4038       marking step aborts and the SATB buffers are processed at
4039       the beginning of the next invocation.
4040 
4041       (4) To yield. when we have to yield then we abort and yield
4042       right at the end of do_marking_step(). This saves us from a lot
4043       of hassle as, by yielding we might allow a Full GC. If this
4044       happens then objects will be compacted underneath our feet, the
4045       heap might shrink, etc. We save checking for this by just
4046       aborting and doing the yield right at the end.
4047 
4048     From the above it follows that the do_marking_step() method should
4049     be called in a loop (or, otherwise, regularly) until it completes.
4050 
4051     If a marking step completes without its has_aborted() flag being
4052     true, it means it has completed the current marking phase (and
4053     also all other marking tasks have done so and have all synced up).
4054 
4055     A method called regular_clock_call() is invoked "regularly" (in
4056     sub ms intervals) throughout marking. It is this clock method that
4057     checks all the abort conditions which were mentioned above and
4058     decides when the task should abort. A work-based scheme is used to
4059     trigger this clock method: when the number of object words the
4060     marking phase has scanned or the number of references the marking
4061     phase has visited reach a given limit. Additional invocations to
4062     the method clock have been planted in a few other strategic places
4063     too. The initial reason for the clock method was to avoid calling
4064     vtime too regularly, as it is quite expensive. So, once it was in
4065     place, it was natural to piggy-back all the other conditions on it
4066     too and not constantly check them throughout the code.
4067 
4068     If do_termination is true then do_marking_step will enter its
4069     termination protocol.
4070 
4071     The value of is_serial must be true when do_marking_step is being
4072     called serially (i.e. by the VMThread) and do_marking_step should
4073     skip any synchronization in the termination and overflow code.
4074     Examples include the serial remark code and the serial reference
4075     processing closures.
4076 
4077     The value of is_serial must be false when do_marking_step is
4078     being called by any of the worker threads in a work gang.
4079     Examples include the concurrent marking code (CMMarkingTask),
4080     the MT remark code, and the MT reference processing closures.
4081 
4082  *****************************************************************************/
4083 
4084 void CMTask::do_marking_step(double time_target_ms,
4085                              bool do_termination,
4086                              bool is_serial) {
4087   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4088   assert(concurrent() == _cm->concurrent(), "they should be the same");
4089 
4090   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4091   assert(_task_queues != NULL, "invariant");
4092   assert(_task_queue != NULL, "invariant");
4093   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4094 
4095   assert(!_claimed,
4096          "only one thread should claim this task at any one time");
4097 
4098   // OK, this doesn't safeguard again all possible scenarios, as it is
4099   // possible for two threads to set the _claimed flag at the same
4100   // time. But it is only for debugging purposes anyway and it will
4101   // catch most problems.
4102   _claimed = true;
4103 
4104   _start_time_ms = os::elapsedVTime() * 1000.0;
4105   statsOnly( _interval_start_time_ms = _start_time_ms );
4106 
4107   // If do_stealing is true then do_marking_step will attempt to
4108   // steal work from the other CMTasks. It only makes sense to
4109   // enable stealing when the termination protocol is enabled
4110   // and do_marking_step() is not being called serially.
4111   bool do_stealing = do_termination && !is_serial;
4112 
4113   double diff_prediction_ms =
4114     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4115   _time_target_ms = time_target_ms - diff_prediction_ms;
4116 
4117   // set up the variables that are used in the work-based scheme to
4118   // call the regular clock method
4119   _words_scanned = 0;
4120   _refs_reached  = 0;
4121   recalculate_limits();
4122 
4123   // clear all flags
4124   clear_has_aborted();
4125   _has_timed_out = false;
4126   _draining_satb_buffers = false;
4127 
4128   ++_calls;
4129 
4130   if (_cm->verbose_low()) {
4131     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4132                            "target = %1.2lfms >>>>>>>>>>",
4133                            _worker_id, _calls, _time_target_ms);
4134   }
4135 
4136   // Set up the bitmap and oop closures. Anything that uses them is
4137   // eventually called from this method, so it is OK to allocate these
4138   // statically.
4139   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4140   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4141   set_cm_oop_closure(&cm_oop_closure);
4142 
4143   if (_cm->has_overflown()) {
4144     // This can happen if the mark stack overflows during a GC pause
4145     // and this task, after a yield point, restarts. We have to abort
4146     // as we need to get into the overflow protocol which happens
4147     // right at the end of this task.
4148     set_has_aborted();
4149   }
4150 
4151   // First drain any available SATB buffers. After this, we will not
4152   // look at SATB buffers before the next invocation of this method.
4153   // If enough completed SATB buffers are queued up, the regular clock
4154   // will abort this task so that it restarts.
4155   drain_satb_buffers();
4156   // ...then partially drain the local queue and the global stack
4157   drain_local_queue(true);
4158   drain_global_stack(true);
4159 
4160   do {
4161     if (!has_aborted() && _curr_region != NULL) {
4162       // This means that we're already holding on to a region.
4163       assert(_finger != NULL, "if region is not NULL, then the finger "
4164              "should not be NULL either");
4165 
4166       // We might have restarted this task after an evacuation pause
4167       // which might have evacuated the region we're holding on to
4168       // underneath our feet. Let's read its limit again to make sure
4169       // that we do not iterate over a region of the heap that
4170       // contains garbage (update_region_limit() will also move
4171       // _finger to the start of the region if it is found empty).
4172       update_region_limit();
4173       // We will start from _finger not from the start of the region,
4174       // as we might be restarting this task after aborting half-way
4175       // through scanning this region. In this case, _finger points to
4176       // the address where we last found a marked object. If this is a
4177       // fresh region, _finger points to start().
4178       MemRegion mr = MemRegion(_finger, _region_limit);
4179 
4180       if (_cm->verbose_low()) {
4181         gclog_or_tty->print_cr("[%u] we're scanning part "
4182                                "["PTR_FORMAT", "PTR_FORMAT") "
4183                                "of region "HR_FORMAT,
4184                                _worker_id, p2i(_finger), p2i(_region_limit),
4185                                HR_FORMAT_PARAMS(_curr_region));
4186       }
4187 
4188       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
4189              "humongous regions should go around loop once only");
4190 
4191       // Some special cases:
4192       // If the memory region is empty, we can just give up the region.
4193       // If the current region is humongous then we only need to check
4194       // the bitmap for the bit associated with the start of the object,
4195       // scan the object if it's live, and give up the region.
4196       // Otherwise, let's iterate over the bitmap of the part of the region
4197       // that is left.
4198       // If the iteration is successful, give up the region.
4199       if (mr.is_empty()) {
4200         giveup_current_region();
4201         regular_clock_call();
4202       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
4203         if (_nextMarkBitMap->isMarked(mr.start())) {
4204           // The object is marked - apply the closure
4205           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4206           bitmap_closure.do_bit(offset);
4207         }
4208         // Even if this task aborted while scanning the humongous object
4209         // we can (and should) give up the current region.
4210         giveup_current_region();
4211         regular_clock_call();
4212       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4213         giveup_current_region();
4214         regular_clock_call();
4215       } else {
4216         assert(has_aborted(), "currently the only way to do so");
4217         // The only way to abort the bitmap iteration is to return
4218         // false from the do_bit() method. However, inside the
4219         // do_bit() method we move the _finger to point to the
4220         // object currently being looked at. So, if we bail out, we
4221         // have definitely set _finger to something non-null.
4222         assert(_finger != NULL, "invariant");
4223 
4224         // Region iteration was actually aborted. So now _finger
4225         // points to the address of the object we last scanned. If we
4226         // leave it there, when we restart this task, we will rescan
4227         // the object. It is easy to avoid this. We move the finger by
4228         // enough to point to the next possible object header (the
4229         // bitmap knows by how much we need to move it as it knows its
4230         // granularity).
4231         assert(_finger < _region_limit, "invariant");
4232         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4233         // Check if bitmap iteration was aborted while scanning the last object
4234         if (new_finger >= _region_limit) {
4235           giveup_current_region();
4236         } else {
4237           move_finger_to(new_finger);
4238         }
4239       }
4240     }
4241     // At this point we have either completed iterating over the
4242     // region we were holding on to, or we have aborted.
4243 
4244     // We then partially drain the local queue and the global stack.
4245     // (Do we really need this?)
4246     drain_local_queue(true);
4247     drain_global_stack(true);
4248 
4249     // Read the note on the claim_region() method on why it might
4250     // return NULL with potentially more regions available for
4251     // claiming and why we have to check out_of_regions() to determine
4252     // whether we're done or not.
4253     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4254       // We are going to try to claim a new region. We should have
4255       // given up on the previous one.
4256       // Separated the asserts so that we know which one fires.
4257       assert(_curr_region  == NULL, "invariant");
4258       assert(_finger       == NULL, "invariant");
4259       assert(_region_limit == NULL, "invariant");
4260       if (_cm->verbose_low()) {
4261         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4262       }
4263       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4264       if (claimed_region != NULL) {
4265         // Yes, we managed to claim one
4266         statsOnly( ++_regions_claimed );
4267 
4268         if (_cm->verbose_low()) {
4269           gclog_or_tty->print_cr("[%u] we successfully claimed "
4270                                  "region "PTR_FORMAT,
4271                                  _worker_id, p2i(claimed_region));
4272         }
4273 
4274         setup_for_region(claimed_region);
4275         assert(_curr_region == claimed_region, "invariant");
4276       }
4277       // It is important to call the regular clock here. It might take
4278       // a while to claim a region if, for example, we hit a large
4279       // block of empty regions. So we need to call the regular clock
4280       // method once round the loop to make sure it's called
4281       // frequently enough.
4282       regular_clock_call();
4283     }
4284 
4285     if (!has_aborted() && _curr_region == NULL) {
4286       assert(_cm->out_of_regions(),
4287              "at this point we should be out of regions");
4288     }
4289   } while ( _curr_region != NULL && !has_aborted());
4290 
4291   if (!has_aborted()) {
4292     // We cannot check whether the global stack is empty, since other
4293     // tasks might be pushing objects to it concurrently.
4294     assert(_cm->out_of_regions(),
4295            "at this point we should be out of regions");
4296 
4297     if (_cm->verbose_low()) {
4298       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4299     }
4300 
4301     // Try to reduce the number of available SATB buffers so that
4302     // remark has less work to do.
4303     drain_satb_buffers();
4304   }
4305 
4306   // Since we've done everything else, we can now totally drain the
4307   // local queue and global stack.
4308   drain_local_queue(false);
4309   drain_global_stack(false);
4310 
4311   // Attempt at work stealing from other task's queues.
4312   if (do_stealing && !has_aborted()) {
4313     // We have not aborted. This means that we have finished all that
4314     // we could. Let's try to do some stealing...
4315 
4316     // We cannot check whether the global stack is empty, since other
4317     // tasks might be pushing objects to it concurrently.
4318     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4319            "only way to reach here");
4320 
4321     if (_cm->verbose_low()) {
4322       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4323     }
4324 
4325     while (!has_aborted()) {
4326       oop obj;
4327       statsOnly( ++_steal_attempts );
4328 
4329       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4330         if (_cm->verbose_medium()) {
4331           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4332                                  _worker_id, p2i((void*) obj));
4333         }
4334 
4335         statsOnly( ++_steals );
4336         process_queue_entry(obj);
4337 
4338         // And since we're towards the end, let's totally drain the
4339         // local queue and global stack.
4340         drain_local_queue(false);
4341         drain_global_stack(false);
4342       } else {
4343         break;
4344       }
4345     }
4346   }
4347 
4348   // If we are about to wrap up and go into termination, check if we
4349   // should raise the overflow flag.
4350   if (do_termination && !has_aborted()) {
4351     if (_cm->force_overflow()->should_force()) {
4352       _cm->set_has_overflown();
4353       regular_clock_call();
4354     }
4355   }
4356 
4357   // We still haven't aborted. Now, let's try to get into the
4358   // termination protocol.
4359   if (do_termination && !has_aborted()) {
4360     // We cannot check whether the global stack is empty, since other
4361     // tasks might be concurrently pushing objects on it.
4362     // Separated the asserts so that we know which one fires.
4363     assert(_cm->out_of_regions(), "only way to reach here");
4364     assert(_task_queue->size() == 0, "only way to reach here");
4365 
4366     if (_cm->verbose_low()) {
4367       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4368     }
4369 
4370     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4371 
4372     // The CMTask class also extends the TerminatorTerminator class,
4373     // hence its should_exit_termination() method will also decide
4374     // whether to exit the termination protocol or not.
4375     bool finished = (is_serial ||
4376                      _cm->terminator()->offer_termination(this));
4377     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4378     _termination_time_ms +=
4379       termination_end_time_ms - _termination_start_time_ms;
4380 
4381     if (finished) {
4382       // We're all done.
4383 
4384       if (_worker_id == 0) {
4385         // let's allow task 0 to do this
4386         if (concurrent()) {
4387           assert(_cm->concurrent_marking_in_progress(), "invariant");
4388           // we need to set this to false before the next
4389           // safepoint. This way we ensure that the marking phase
4390           // doesn't observe any more heap expansions.
4391           _cm->clear_concurrent_marking_in_progress();
4392         }
4393       }
4394 
4395       // We can now guarantee that the global stack is empty, since
4396       // all other tasks have finished. We separated the guarantees so
4397       // that, if a condition is false, we can immediately find out
4398       // which one.
4399       guarantee(_cm->out_of_regions(), "only way to reach here");
4400       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4401       guarantee(_task_queue->size() == 0, "only way to reach here");
4402       guarantee(!_cm->has_overflown(), "only way to reach here");
4403       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4404 
4405       if (_cm->verbose_low()) {
4406         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4407       }
4408     } else {
4409       // Apparently there's more work to do. Let's abort this task. It
4410       // will restart it and we can hopefully find more things to do.
4411 
4412       if (_cm->verbose_low()) {
4413         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4414                                _worker_id);
4415       }
4416 
4417       set_has_aborted();
4418       statsOnly( ++_aborted_termination );
4419     }
4420   }
4421 
4422   // Mainly for debugging purposes to make sure that a pointer to the
4423   // closure which was statically allocated in this frame doesn't
4424   // escape it by accident.
4425   set_cm_oop_closure(NULL);
4426   double end_time_ms = os::elapsedVTime() * 1000.0;
4427   double elapsed_time_ms = end_time_ms - _start_time_ms;
4428   // Update the step history.
4429   _step_times_ms.add(elapsed_time_ms);
4430 
4431   if (has_aborted()) {
4432     // The task was aborted for some reason.
4433 
4434     statsOnly( ++_aborted );
4435 
4436     if (_has_timed_out) {
4437       double diff_ms = elapsed_time_ms - _time_target_ms;
4438       // Keep statistics of how well we did with respect to hitting
4439       // our target only if we actually timed out (if we aborted for
4440       // other reasons, then the results might get skewed).
4441       _marking_step_diffs_ms.add(diff_ms);
4442     }
4443 
4444     if (_cm->has_overflown()) {
4445       // This is the interesting one. We aborted because a global
4446       // overflow was raised. This means we have to restart the
4447       // marking phase and start iterating over regions. However, in
4448       // order to do this we have to make sure that all tasks stop
4449       // what they are doing and re-initialize in a safe manner. We
4450       // will achieve this with the use of two barrier sync points.
4451 
4452       if (_cm->verbose_low()) {
4453         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4454       }
4455 
4456       if (!is_serial) {
4457         // We only need to enter the sync barrier if being called
4458         // from a parallel context
4459         _cm->enter_first_sync_barrier(_worker_id);
4460 
4461         // When we exit this sync barrier we know that all tasks have
4462         // stopped doing marking work. So, it's now safe to
4463         // re-initialize our data structures. At the end of this method,
4464         // task 0 will clear the global data structures.
4465       }
4466 
4467       statsOnly( ++_aborted_overflow );
4468 
4469       // We clear the local state of this task...
4470       clear_region_fields();
4471 
4472       if (!is_serial) {
4473         // ...and enter the second barrier.
4474         _cm->enter_second_sync_barrier(_worker_id);
4475       }
4476       // At this point, if we're during the concurrent phase of
4477       // marking, everything has been re-initialized and we're
4478       // ready to restart.
4479     }
4480 
4481     if (_cm->verbose_low()) {
4482       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4483                              "elapsed = %1.2lfms <<<<<<<<<<",
4484                              _worker_id, _time_target_ms, elapsed_time_ms);
4485       if (_cm->has_aborted()) {
4486         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4487                                _worker_id);
4488       }
4489     }
4490   } else {
4491     if (_cm->verbose_low()) {
4492       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4493                              "elapsed = %1.2lfms <<<<<<<<<<",
4494                              _worker_id, _time_target_ms, elapsed_time_ms);
4495     }
4496   }
4497 
4498   _claimed = false;
4499 }
4500 
4501 CMTask::CMTask(uint worker_id,
4502                ConcurrentMark* cm,
4503                size_t* marked_bytes,
4504                BitMap* card_bm,
4505                CMTaskQueue* task_queue,
4506                CMTaskQueueSet* task_queues)
4507   : _g1h(G1CollectedHeap::heap()),
4508     _worker_id(worker_id), _cm(cm),
4509     _claimed(false),
4510     _nextMarkBitMap(NULL), _hash_seed(17),
4511     _task_queue(task_queue),
4512     _task_queues(task_queues),
4513     _cm_oop_closure(NULL),
4514     _marked_bytes_array(marked_bytes),
4515     _card_bm(card_bm) {
4516   guarantee(task_queue != NULL, "invariant");
4517   guarantee(task_queues != NULL, "invariant");
4518 
4519   statsOnly( _clock_due_to_scanning = 0;
4520              _clock_due_to_marking  = 0 );
4521 
4522   _marking_step_diffs_ms.add(0.5);
4523 }
4524 
4525 // These are formatting macros that are used below to ensure
4526 // consistent formatting. The *_H_* versions are used to format the
4527 // header for a particular value and they should be kept consistent
4528 // with the corresponding macro. Also note that most of the macros add
4529 // the necessary white space (as a prefix) which makes them a bit
4530 // easier to compose.
4531 
4532 // All the output lines are prefixed with this string to be able to
4533 // identify them easily in a large log file.
4534 #define G1PPRL_LINE_PREFIX            "###"
4535 
4536 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4537 #ifdef _LP64
4538 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4539 #else // _LP64
4540 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4541 #endif // _LP64
4542 
4543 // For per-region info
4544 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4545 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4546 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4547 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4548 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4549 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4550 
4551 // For summary info
4552 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4553 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4554 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4555 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4556 
4557 G1PrintRegionLivenessInfoClosure::
4558 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4559   : _out(out),
4560     _total_used_bytes(0), _total_capacity_bytes(0),
4561     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4562     _hum_used_bytes(0), _hum_capacity_bytes(0),
4563     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4564     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4565   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4566   MemRegion g1_reserved = g1h->g1_reserved();
4567   double now = os::elapsedTime();
4568 
4569   // Print the header of the output.
4570   _out->cr();
4571   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4572   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4573                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4574                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4575                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4576                  HeapRegion::GrainBytes);
4577   _out->print_cr(G1PPRL_LINE_PREFIX);
4578   _out->print_cr(G1PPRL_LINE_PREFIX
4579                 G1PPRL_TYPE_H_FORMAT
4580                 G1PPRL_ADDR_BASE_H_FORMAT
4581                 G1PPRL_BYTE_H_FORMAT
4582                 G1PPRL_BYTE_H_FORMAT
4583                 G1PPRL_BYTE_H_FORMAT
4584                 G1PPRL_DOUBLE_H_FORMAT
4585                 G1PPRL_BYTE_H_FORMAT
4586                 G1PPRL_BYTE_H_FORMAT,
4587                 "type", "address-range",
4588                 "used", "prev-live", "next-live", "gc-eff",
4589                 "remset", "code-roots");
4590   _out->print_cr(G1PPRL_LINE_PREFIX
4591                 G1PPRL_TYPE_H_FORMAT
4592                 G1PPRL_ADDR_BASE_H_FORMAT
4593                 G1PPRL_BYTE_H_FORMAT
4594                 G1PPRL_BYTE_H_FORMAT
4595                 G1PPRL_BYTE_H_FORMAT
4596                 G1PPRL_DOUBLE_H_FORMAT
4597                 G1PPRL_BYTE_H_FORMAT
4598                 G1PPRL_BYTE_H_FORMAT,
4599                 "", "",
4600                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4601                 "(bytes)", "(bytes)");
4602 }
4603 
4604 // It takes as a parameter a reference to one of the _hum_* fields, it
4605 // deduces the corresponding value for a region in a humongous region
4606 // series (either the region size, or what's left if the _hum_* field
4607 // is < the region size), and updates the _hum_* field accordingly.
4608 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4609   size_t bytes = 0;
4610   // The > 0 check is to deal with the prev and next live bytes which
4611   // could be 0.
4612   if (*hum_bytes > 0) {
4613     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4614     *hum_bytes -= bytes;
4615   }
4616   return bytes;
4617 }
4618 
4619 // It deduces the values for a region in a humongous region series
4620 // from the _hum_* fields and updates those accordingly. It assumes
4621 // that that _hum_* fields have already been set up from the "starts
4622 // humongous" region and we visit the regions in address order.
4623 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4624                                                      size_t* capacity_bytes,
4625                                                      size_t* prev_live_bytes,
4626                                                      size_t* next_live_bytes) {
4627   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4628   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4629   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4630   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4631   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4632 }
4633 
4634 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4635   const char* type       = r->get_type_str();
4636   HeapWord* bottom       = r->bottom();
4637   HeapWord* end          = r->end();
4638   size_t capacity_bytes  = r->capacity();
4639   size_t used_bytes      = r->used();
4640   size_t prev_live_bytes = r->live_bytes();
4641   size_t next_live_bytes = r->next_live_bytes();
4642   double gc_eff          = r->gc_efficiency();
4643   size_t remset_bytes    = r->rem_set()->mem_size();
4644   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4645 
4646   if (r->is_starts_humongous()) {
4647     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4648            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4649            "they should have been zeroed after the last time we used them");
4650     // Set up the _hum_* fields.
4651     _hum_capacity_bytes  = capacity_bytes;
4652     _hum_used_bytes      = used_bytes;
4653     _hum_prev_live_bytes = prev_live_bytes;
4654     _hum_next_live_bytes = next_live_bytes;
4655     get_hum_bytes(&used_bytes, &capacity_bytes,
4656                   &prev_live_bytes, &next_live_bytes);
4657     end = bottom + HeapRegion::GrainWords;
4658   } else if (r->is_continues_humongous()) {
4659     get_hum_bytes(&used_bytes, &capacity_bytes,
4660                   &prev_live_bytes, &next_live_bytes);
4661     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4662   }
4663 
4664   _total_used_bytes      += used_bytes;
4665   _total_capacity_bytes  += capacity_bytes;
4666   _total_prev_live_bytes += prev_live_bytes;
4667   _total_next_live_bytes += next_live_bytes;
4668   _total_remset_bytes    += remset_bytes;
4669   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4670 
4671   // Print a line for this particular region.
4672   _out->print_cr(G1PPRL_LINE_PREFIX
4673                  G1PPRL_TYPE_FORMAT
4674                  G1PPRL_ADDR_BASE_FORMAT
4675                  G1PPRL_BYTE_FORMAT
4676                  G1PPRL_BYTE_FORMAT
4677                  G1PPRL_BYTE_FORMAT
4678                  G1PPRL_DOUBLE_FORMAT
4679                  G1PPRL_BYTE_FORMAT
4680                  G1PPRL_BYTE_FORMAT,
4681                  type, p2i(bottom), p2i(end),
4682                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4683                  remset_bytes, strong_code_roots_bytes);
4684 
4685   return false;
4686 }
4687 
4688 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4689   // add static memory usages to remembered set sizes
4690   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4691   // Print the footer of the output.
4692   _out->print_cr(G1PPRL_LINE_PREFIX);
4693   _out->print_cr(G1PPRL_LINE_PREFIX
4694                  " SUMMARY"
4695                  G1PPRL_SUM_MB_FORMAT("capacity")
4696                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4697                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4698                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4699                  G1PPRL_SUM_MB_FORMAT("remset")
4700                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4701                  bytes_to_mb(_total_capacity_bytes),
4702                  bytes_to_mb(_total_used_bytes),
4703                  perc(_total_used_bytes, _total_capacity_bytes),
4704                  bytes_to_mb(_total_prev_live_bytes),
4705                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4706                  bytes_to_mb(_total_next_live_bytes),
4707                  perc(_total_next_live_bytes, _total_capacity_bytes),
4708                  bytes_to_mb(_total_remset_bytes),
4709                  bytes_to_mb(_total_strong_code_roots_bytes));
4710   _out->cr();
4711 }