1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 CMBitMapRO::CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                const HeapWord* limit) const {
  73   // First we must round addr *up* to a possible object boundary.
  74   addr = (HeapWord*)align_size_up((intptr_t)addr,
  75                                   HeapWordSize << _shifter);
  76   size_t addrOffset = heapWordToOffset(addr);
  77   if (limit == NULL) {
  78     limit = _bmStartWord + _bmWordSize;
  79   }
  80   size_t limitOffset = heapWordToOffset(limit);
  81   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  82   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  83   assert(nextAddr >= addr, "get_next_one postcondition");
  84   assert(nextAddr == limit || isMarked(nextAddr),
  85          "get_next_one postcondition");
  86   return nextAddr;
  87 }
  88 
  89 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  90                                                  const HeapWord* limit) const {
  91   size_t addrOffset = heapWordToOffset(addr);
  92   if (limit == NULL) {
  93     limit = _bmStartWord + _bmWordSize;
  94   }
  95   size_t limitOffset = heapWordToOffset(limit);
  96   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  97   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  98   assert(nextAddr >= addr, "get_next_one postcondition");
  99   assert(nextAddr == limit || !isMarked(nextAddr),
 100          "get_next_one postcondition");
 101   return nextAddr;
 102 }
 103 
 104 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
 105   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 106   return (int) (diff >> _shifter);
 107 }
 108 
 109 #ifndef PRODUCT
 110 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 111   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 112   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 113          "size inconsistency");
 114   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 115          _bmWordSize  == heap_rs.word_size();
 116 }
 117 #endif
 118 
 119 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 120   _bm.print_on_error(st, prefix);
 121 }
 122 
 123 size_t CMBitMap::compute_size(size_t heap_size) {
 124   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 125 }
 126 
 127 size_t CMBitMap::mark_distance() {
 128   return MinObjAlignmentInBytes * BitsPerByte;
 129 }
 130 
 131 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 132   _bmStartWord = heap.start();
 133   _bmWordSize = heap.word_size();
 134 
 135   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 136   _bm.set_size(_bmWordSize >> _shifter);
 137 
 138   storage->set_mapping_changed_listener(&_listener);
 139 }
 140 
 141 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 142   if (zero_filled) {
 143     return;
 144   }
 145   // We need to clear the bitmap on commit, removing any existing information.
 146   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 147   _bm->clearRange(mr);
 148 }
 149 
 150 // Closure used for clearing the given mark bitmap.
 151 class ClearBitmapHRClosure : public HeapRegionClosure {
 152  private:
 153   ConcurrentMark* _cm;
 154   CMBitMap* _bitmap;
 155   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 156  public:
 157   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 158     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 159   }
 160 
 161   virtual bool doHeapRegion(HeapRegion* r) {
 162     size_t const chunk_size_in_words = M / HeapWordSize;
 163 
 164     HeapWord* cur = r->bottom();
 165     HeapWord* const end = r->end();
 166 
 167     while (cur < end) {
 168       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 169       _bitmap->clearRange(mr);
 170 
 171       cur += chunk_size_in_words;
 172 
 173       // Abort iteration if after yielding the marking has been aborted.
 174       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 175         return true;
 176       }
 177       // Repeat the asserts from before the start of the closure. We will do them
 178       // as asserts here to minimize their overhead on the product. However, we
 179       // will have them as guarantees at the beginning / end of the bitmap
 180       // clearing to get some checking in the product.
 181       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 182       assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
 183     }
 184 
 185     return false;
 186   }
 187 };
 188 
 189 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 190   ClearBitmapHRClosure* _cl;
 191   HeapRegionClaimer     _hrclaimer;
 192   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 193 
 194 public:
 195   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 196       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 197 
 198   void work(uint worker_id) {
 199     SuspendibleThreadSetJoiner sts_join(_suspendible);
 200     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 201   }
 202 };
 203 
 204 void CMBitMap::clearAll() {
 205   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 206   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 207   uint n_workers = g1h->workers()->active_workers();
 208   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 209   g1h->workers()->run_task(&task);
 210   guarantee(cl.complete(), "Must have completed iteration.");
 211   return;
 212 }
 213 
 214 void CMBitMap::markRange(MemRegion mr) {
 215   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 216   assert(!mr.is_empty(), "unexpected empty region");
 217   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 218           ((HeapWord *) mr.end())),
 219          "markRange memory region end is not card aligned");
 220   // convert address range into offset range
 221   _bm.at_put_range(heapWordToOffset(mr.start()),
 222                    heapWordToOffset(mr.end()), true);
 223 }
 224 
 225 void CMBitMap::clearRange(MemRegion mr) {
 226   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 227   assert(!mr.is_empty(), "unexpected empty region");
 228   // convert address range into offset range
 229   _bm.at_put_range(heapWordToOffset(mr.start()),
 230                    heapWordToOffset(mr.end()), false);
 231 }
 232 
 233 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 234                                             HeapWord* end_addr) {
 235   HeapWord* start = getNextMarkedWordAddress(addr);
 236   start = MIN2(start, end_addr);
 237   HeapWord* end   = getNextUnmarkedWordAddress(start);
 238   end = MIN2(end, end_addr);
 239   assert(start <= end, "Consistency check");
 240   MemRegion mr(start, end);
 241   if (!mr.is_empty()) {
 242     clearRange(mr);
 243   }
 244   return mr;
 245 }
 246 
 247 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 248   _base(NULL), _cm(cm)
 249 #ifdef ASSERT
 250   , _drain_in_progress(false)
 251   , _drain_in_progress_yields(false)
 252 #endif
 253 {}
 254 
 255 bool CMMarkStack::allocate(size_t capacity) {
 256   // allocate a stack of the requisite depth
 257   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 258   if (!rs.is_reserved()) {
 259     warning("ConcurrentMark MarkStack allocation failure");
 260     return false;
 261   }
 262   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 263   if (!_virtual_space.initialize(rs, rs.size())) {
 264     warning("ConcurrentMark MarkStack backing store failure");
 265     // Release the virtual memory reserved for the marking stack
 266     rs.release();
 267     return false;
 268   }
 269   assert(_virtual_space.committed_size() == rs.size(),
 270          "Didn't reserve backing store for all of ConcurrentMark stack?");
 271   _base = (oop*) _virtual_space.low();
 272   setEmpty();
 273   _capacity = (jint) capacity;
 274   _saved_index = -1;
 275   _should_expand = false;
 276   return true;
 277 }
 278 
 279 void CMMarkStack::expand() {
 280   // Called, during remark, if we've overflown the marking stack during marking.
 281   assert(isEmpty(), "stack should been emptied while handling overflow");
 282   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 283   // Clear expansion flag
 284   _should_expand = false;
 285   if (_capacity == (jint) MarkStackSizeMax) {
 286     if (PrintGCDetails && Verbose) {
 287       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 288     }
 289     return;
 290   }
 291   // Double capacity if possible
 292   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 293   // Do not give up existing stack until we have managed to
 294   // get the double capacity that we desired.
 295   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 296                                                            sizeof(oop)));
 297   if (rs.is_reserved()) {
 298     // Release the backing store associated with old stack
 299     _virtual_space.release();
 300     // Reinitialize virtual space for new stack
 301     if (!_virtual_space.initialize(rs, rs.size())) {
 302       fatal("Not enough swap for expanded marking stack capacity");
 303     }
 304     _base = (oop*)(_virtual_space.low());
 305     _index = 0;
 306     _capacity = new_capacity;
 307   } else {
 308     if (PrintGCDetails && Verbose) {
 309       // Failed to double capacity, continue;
 310       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 311                           SIZE_FORMAT "K to " SIZE_FORMAT "K",
 312                           _capacity / K, new_capacity / K);
 313     }
 314   }
 315 }
 316 
 317 void CMMarkStack::set_should_expand() {
 318   // If we're resetting the marking state because of an
 319   // marking stack overflow, record that we should, if
 320   // possible, expand the stack.
 321   _should_expand = _cm->has_overflown();
 322 }
 323 
 324 CMMarkStack::~CMMarkStack() {
 325   if (_base != NULL) {
 326     _base = NULL;
 327     _virtual_space.release();
 328   }
 329 }
 330 
 331 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 332   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 333   jint start = _index;
 334   jint next_index = start + n;
 335   if (next_index > _capacity) {
 336     _overflow = true;
 337     return;
 338   }
 339   // Otherwise.
 340   _index = next_index;
 341   for (int i = 0; i < n; i++) {
 342     int ind = start + i;
 343     assert(ind < _capacity, "By overflow test above.");
 344     _base[ind] = ptr_arr[i];
 345   }
 346 }
 347 
 348 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 349   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 350   jint index = _index;
 351   if (index == 0) {
 352     *n = 0;
 353     return false;
 354   } else {
 355     int k = MIN2(max, index);
 356     jint  new_ind = index - k;
 357     for (int j = 0; j < k; j++) {
 358       ptr_arr[j] = _base[new_ind + j];
 359     }
 360     _index = new_ind;
 361     *n = k;
 362     return true;
 363   }
 364 }
 365 
 366 template<class OopClosureClass>
 367 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 368   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 369          || SafepointSynchronize::is_at_safepoint(),
 370          "Drain recursion must be yield-safe.");
 371   bool res = true;
 372   debug_only(_drain_in_progress = true);
 373   debug_only(_drain_in_progress_yields = yield_after);
 374   while (!isEmpty()) {
 375     oop newOop = pop();
 376     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 377     assert(newOop->is_oop(), "Expected an oop");
 378     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 379            "only grey objects on this stack");
 380     newOop->oop_iterate(cl);
 381     if (yield_after && _cm->do_yield_check()) {
 382       res = false;
 383       break;
 384     }
 385   }
 386   debug_only(_drain_in_progress = false);
 387   return res;
 388 }
 389 
 390 void CMMarkStack::note_start_of_gc() {
 391   assert(_saved_index == -1,
 392          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 393   _saved_index = _index;
 394 }
 395 
 396 void CMMarkStack::note_end_of_gc() {
 397   // This is intentionally a guarantee, instead of an assert. If we
 398   // accidentally add something to the mark stack during GC, it
 399   // will be a correctness issue so it's better if we crash. we'll
 400   // only check this once per GC anyway, so it won't be a performance
 401   // issue in any way.
 402   guarantee(_saved_index == _index,
 403             err_msg("saved index: %d index: %d", _saved_index, _index));
 404   _saved_index = -1;
 405 }
 406 
 407 CMRootRegions::CMRootRegions() :
 408   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 409   _should_abort(false),  _next_survivor(NULL) { }
 410 
 411 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 412   _young_list = g1h->young_list();
 413   _cm = cm;
 414 }
 415 
 416 void CMRootRegions::prepare_for_scan() {
 417   assert(!scan_in_progress(), "pre-condition");
 418 
 419   // Currently, only survivors can be root regions.
 420   assert(_next_survivor == NULL, "pre-condition");
 421   _next_survivor = _young_list->first_survivor_region();
 422   _scan_in_progress = (_next_survivor != NULL);
 423   _should_abort = false;
 424 }
 425 
 426 HeapRegion* CMRootRegions::claim_next() {
 427   if (_should_abort) {
 428     // If someone has set the should_abort flag, we return NULL to
 429     // force the caller to bail out of their loop.
 430     return NULL;
 431   }
 432 
 433   // Currently, only survivors can be root regions.
 434   HeapRegion* res = _next_survivor;
 435   if (res != NULL) {
 436     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 437     // Read it again in case it changed while we were waiting for the lock.
 438     res = _next_survivor;
 439     if (res != NULL) {
 440       if (res == _young_list->last_survivor_region()) {
 441         // We just claimed the last survivor so store NULL to indicate
 442         // that we're done.
 443         _next_survivor = NULL;
 444       } else {
 445         _next_survivor = res->get_next_young_region();
 446       }
 447     } else {
 448       // Someone else claimed the last survivor while we were trying
 449       // to take the lock so nothing else to do.
 450     }
 451   }
 452   assert(res == NULL || res->is_survivor(), "post-condition");
 453 
 454   return res;
 455 }
 456 
 457 void CMRootRegions::scan_finished() {
 458   assert(scan_in_progress(), "pre-condition");
 459 
 460   // Currently, only survivors can be root regions.
 461   if (!_should_abort) {
 462     assert(_next_survivor == NULL, "we should have claimed all survivors");
 463   }
 464   _next_survivor = NULL;
 465 
 466   {
 467     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 468     _scan_in_progress = false;
 469     RootRegionScan_lock->notify_all();
 470   }
 471 }
 472 
 473 bool CMRootRegions::wait_until_scan_finished() {
 474   if (!scan_in_progress()) return false;
 475 
 476   {
 477     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 478     while (scan_in_progress()) {
 479       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 480     }
 481   }
 482   return true;
 483 }
 484 
 485 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 486 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 487 #endif // _MSC_VER
 488 
 489 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 490   return MAX2((n_par_threads + 2) / 4, 1U);
 491 }
 492 
 493 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 494   _g1h(g1h),
 495   _markBitMap1(),
 496   _markBitMap2(),
 497   _parallel_marking_threads(0),
 498   _max_parallel_marking_threads(0),
 499   _sleep_factor(0.0),
 500   _marking_task_overhead(1.0),
 501   _cleanup_sleep_factor(0.0),
 502   _cleanup_task_overhead(1.0),
 503   _cleanup_list("Cleanup List"),
 504   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 505   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 506             CardTableModRefBS::card_shift,
 507             false /* in_resource_area*/),
 508 
 509   _prevMarkBitMap(&_markBitMap1),
 510   _nextMarkBitMap(&_markBitMap2),
 511 
 512   _markStack(this),
 513   // _finger set in set_non_marking_state
 514 
 515   _max_worker_id(ParallelGCThreads),
 516   // _active_tasks set in set_non_marking_state
 517   // _tasks set inside the constructor
 518   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 519   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 520 
 521   _has_overflown(false),
 522   _concurrent(false),
 523   _has_aborted(false),
 524   _restart_for_overflow(false),
 525   _concurrent_marking_in_progress(false),
 526 
 527   // _verbose_level set below
 528 
 529   _init_times(),
 530   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 531   _cleanup_times(),
 532   _total_counting_time(0.0),
 533   _total_rs_scrub_time(0.0),
 534 
 535   _parallel_workers(NULL),
 536 
 537   _count_card_bitmaps(NULL),
 538   _count_marked_bytes(NULL),
 539   _completed_initialization(false) {
 540   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 541   if (verbose_level < no_verbose) {
 542     verbose_level = no_verbose;
 543   }
 544   if (verbose_level > high_verbose) {
 545     verbose_level = high_verbose;
 546   }
 547   _verbose_level = verbose_level;
 548 
 549   if (verbose_low()) {
 550     gclog_or_tty->print_cr("[global] init, heap start = " PTR_FORMAT ", "
 551                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 552   }
 553 
 554   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 555   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 556 
 557   // Create & start a ConcurrentMark thread.
 558   _cmThread = new ConcurrentMarkThread(this);
 559   assert(cmThread() != NULL, "CM Thread should have been created");
 560   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 561   if (_cmThread->osthread() == NULL) {
 562       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 563   }
 564 
 565   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 566   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 567   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 568 
 569   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 570   satb_qs.set_buffer_size(G1SATBBufferSize);
 571 
 572   _root_regions.init(_g1h, this);
 573 
 574   if (ConcGCThreads > ParallelGCThreads) {
 575     warning("Can't have more ConcGCThreads (%u) "
 576             "than ParallelGCThreads (%u).",
 577             ConcGCThreads, ParallelGCThreads);
 578     return;
 579   }
 580   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 581     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 582     // if both are set
 583     _sleep_factor             = 0.0;
 584     _marking_task_overhead    = 1.0;
 585   } else if (G1MarkingOverheadPercent > 0) {
 586     // We will calculate the number of parallel marking threads based
 587     // on a target overhead with respect to the soft real-time goal
 588     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 589     double overall_cm_overhead =
 590       (double) MaxGCPauseMillis * marking_overhead /
 591       (double) GCPauseIntervalMillis;
 592     double cpu_ratio = 1.0 / (double) os::processor_count();
 593     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 594     double marking_task_overhead =
 595       overall_cm_overhead / marking_thread_num *
 596                                               (double) os::processor_count();
 597     double sleep_factor =
 598                        (1.0 - marking_task_overhead) / marking_task_overhead;
 599 
 600     FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
 601     _sleep_factor             = sleep_factor;
 602     _marking_task_overhead    = marking_task_overhead;
 603   } else {
 604     // Calculate the number of parallel marking threads by scaling
 605     // the number of parallel GC threads.
 606     uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
 607     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 608     _sleep_factor             = 0.0;
 609     _marking_task_overhead    = 1.0;
 610   }
 611 
 612   assert(ConcGCThreads > 0, "Should have been set");
 613   _parallel_marking_threads = ConcGCThreads;
 614   _max_parallel_marking_threads = _parallel_marking_threads;
 615 
 616   if (parallel_marking_threads() > 1) {
 617     _cleanup_task_overhead = 1.0;
 618   } else {
 619     _cleanup_task_overhead = marking_task_overhead();
 620   }
 621   _cleanup_sleep_factor =
 622                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 623 
 624 #if 0
 625   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 626   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 627   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 628   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 629   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 630 #endif
 631 
 632   _parallel_workers = new WorkGang("G1 Marker",
 633        _max_parallel_marking_threads, false, true);
 634   if (_parallel_workers == NULL) {
 635     vm_exit_during_initialization("Failed necessary allocation.");
 636   } else {
 637     _parallel_workers->initialize_workers();
 638   }
 639 
 640   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 641     size_t mark_stack_size =
 642       MIN2(MarkStackSizeMax,
 643           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 644     // Verify that the calculated value for MarkStackSize is in range.
 645     // It would be nice to use the private utility routine from Arguments.
 646     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 647       warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 648               "must be between 1 and " SIZE_FORMAT,
 649               mark_stack_size, MarkStackSizeMax);
 650       return;
 651     }
 652     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 653   } else {
 654     // Verify MarkStackSize is in range.
 655     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 656       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 657         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 658           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 659                   "must be between 1 and " SIZE_FORMAT,
 660                   MarkStackSize, MarkStackSizeMax);
 661           return;
 662         }
 663       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 664         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 665           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 666                   " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 667                   MarkStackSize, MarkStackSizeMax);
 668           return;
 669         }
 670       }
 671     }
 672   }
 673 
 674   if (!_markStack.allocate(MarkStackSize)) {
 675     warning("Failed to allocate CM marking stack");
 676     return;
 677   }
 678 
 679   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 680   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 681 
 682   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 683   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 684 
 685   BitMap::idx_t card_bm_size = _card_bm.size();
 686 
 687   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 688   _active_tasks = _max_worker_id;
 689 
 690   uint max_regions = _g1h->max_regions();
 691   for (uint i = 0; i < _max_worker_id; ++i) {
 692     CMTaskQueue* task_queue = new CMTaskQueue();
 693     task_queue->initialize();
 694     _task_queues->register_queue(i, task_queue);
 695 
 696     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 697     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 698 
 699     _tasks[i] = new CMTask(i, this,
 700                            _count_marked_bytes[i],
 701                            &_count_card_bitmaps[i],
 702                            task_queue, _task_queues);
 703 
 704     _accum_task_vtime[i] = 0.0;
 705   }
 706 
 707   // Calculate the card number for the bottom of the heap. Used
 708   // in biasing indexes into the accounting card bitmaps.
 709   _heap_bottom_card_num =
 710     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 711                                 CardTableModRefBS::card_shift);
 712 
 713   // Clear all the liveness counting data
 714   clear_all_count_data();
 715 
 716   // so that the call below can read a sensible value
 717   _heap_start = g1h->reserved_region().start();
 718   set_non_marking_state();
 719   _completed_initialization = true;
 720 }
 721 
 722 void ConcurrentMark::reset() {
 723   // Starting values for these two. This should be called in a STW
 724   // phase.
 725   MemRegion reserved = _g1h->g1_reserved();
 726   _heap_start = reserved.start();
 727   _heap_end   = reserved.end();
 728 
 729   // Separated the asserts so that we know which one fires.
 730   assert(_heap_start != NULL, "heap bounds should look ok");
 731   assert(_heap_end != NULL, "heap bounds should look ok");
 732   assert(_heap_start < _heap_end, "heap bounds should look ok");
 733 
 734   // Reset all the marking data structures and any necessary flags
 735   reset_marking_state();
 736 
 737   if (verbose_low()) {
 738     gclog_or_tty->print_cr("[global] resetting");
 739   }
 740 
 741   // We do reset all of them, since different phases will use
 742   // different number of active threads. So, it's easiest to have all
 743   // of them ready.
 744   for (uint i = 0; i < _max_worker_id; ++i) {
 745     _tasks[i]->reset(_nextMarkBitMap);
 746   }
 747 
 748   // we need this to make sure that the flag is on during the evac
 749   // pause with initial mark piggy-backed
 750   set_concurrent_marking_in_progress();
 751 }
 752 
 753 
 754 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 755   _markStack.set_should_expand();
 756   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 757   if (clear_overflow) {
 758     clear_has_overflown();
 759   } else {
 760     assert(has_overflown(), "pre-condition");
 761   }
 762   _finger = _heap_start;
 763 
 764   for (uint i = 0; i < _max_worker_id; ++i) {
 765     CMTaskQueue* queue = _task_queues->queue(i);
 766     queue->set_empty();
 767   }
 768 }
 769 
 770 void ConcurrentMark::set_concurrency(uint active_tasks) {
 771   assert(active_tasks <= _max_worker_id, "we should not have more");
 772 
 773   _active_tasks = active_tasks;
 774   // Need to update the three data structures below according to the
 775   // number of active threads for this phase.
 776   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 777   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 778   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 779 }
 780 
 781 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 782   set_concurrency(active_tasks);
 783 
 784   _concurrent = concurrent;
 785   // We propagate this to all tasks, not just the active ones.
 786   for (uint i = 0; i < _max_worker_id; ++i)
 787     _tasks[i]->set_concurrent(concurrent);
 788 
 789   if (concurrent) {
 790     set_concurrent_marking_in_progress();
 791   } else {
 792     // We currently assume that the concurrent flag has been set to
 793     // false before we start remark. At this point we should also be
 794     // in a STW phase.
 795     assert(!concurrent_marking_in_progress(), "invariant");
 796     assert(out_of_regions(),
 797            err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 798                    p2i(_finger), p2i(_heap_end)));
 799   }
 800 }
 801 
 802 void ConcurrentMark::set_non_marking_state() {
 803   // We set the global marking state to some default values when we're
 804   // not doing marking.
 805   reset_marking_state();
 806   _active_tasks = 0;
 807   clear_concurrent_marking_in_progress();
 808 }
 809 
 810 ConcurrentMark::~ConcurrentMark() {
 811   // The ConcurrentMark instance is never freed.
 812   ShouldNotReachHere();
 813 }
 814 
 815 void ConcurrentMark::clearNextBitmap() {
 816   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 817 
 818   // Make sure that the concurrent mark thread looks to still be in
 819   // the current cycle.
 820   guarantee(cmThread()->during_cycle(), "invariant");
 821 
 822   // We are finishing up the current cycle by clearing the next
 823   // marking bitmap and getting it ready for the next cycle. During
 824   // this time no other cycle can start. So, let's make sure that this
 825   // is the case.
 826   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 827 
 828   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 829   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 830   _parallel_workers->run_task(&task);
 831 
 832   // Clear the liveness counting data. If the marking has been aborted, the abort()
 833   // call already did that.
 834   if (cl.complete()) {
 835     clear_all_count_data();
 836   }
 837 
 838   // Repeat the asserts from above.
 839   guarantee(cmThread()->during_cycle(), "invariant");
 840   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 841 }
 842 
 843 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 844   CMBitMap* _bitmap;
 845   bool _error;
 846  public:
 847   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 848   }
 849 
 850   virtual bool doHeapRegion(HeapRegion* r) {
 851     // This closure can be called concurrently to the mutator, so we must make sure
 852     // that the result of the getNextMarkedWordAddress() call is compared to the
 853     // value passed to it as limit to detect any found bits.
 854     // We can use the region's orig_end() for the limit and the comparison value
 855     // as it always contains the "real" end of the region that never changes and
 856     // has no side effects.
 857     // Due to the latter, there can also be no problem with the compiler generating
 858     // reloads of the orig_end() call.
 859     HeapWord* end = r->orig_end();
 860     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 861   }
 862 };
 863 
 864 bool ConcurrentMark::nextMarkBitmapIsClear() {
 865   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 866   _g1h->heap_region_iterate(&cl);
 867   return cl.complete();
 868 }
 869 
 870 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 871 public:
 872   bool doHeapRegion(HeapRegion* r) {
 873     if (!r->is_continues_humongous()) {
 874       r->note_start_of_marking();
 875     }
 876     return false;
 877   }
 878 };
 879 
 880 void ConcurrentMark::checkpointRootsInitialPre() {
 881   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 882   G1CollectorPolicy* g1p = g1h->g1_policy();
 883 
 884   _has_aborted = false;
 885 
 886   // Initialize marking structures. This has to be done in a STW phase.
 887   reset();
 888 
 889   // For each region note start of marking.
 890   NoteStartOfMarkHRClosure startcl;
 891   g1h->heap_region_iterate(&startcl);
 892 }
 893 
 894 
 895 void ConcurrentMark::checkpointRootsInitialPost() {
 896   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 897 
 898   // If we force an overflow during remark, the remark operation will
 899   // actually abort and we'll restart concurrent marking. If we always
 900   // force an overflow during remark we'll never actually complete the
 901   // marking phase. So, we initialize this here, at the start of the
 902   // cycle, so that at the remaining overflow number will decrease at
 903   // every remark and we'll eventually not need to cause one.
 904   force_overflow_stw()->init();
 905 
 906   // Start Concurrent Marking weak-reference discovery.
 907   ReferenceProcessor* rp = g1h->ref_processor_cm();
 908   // enable ("weak") refs discovery
 909   rp->enable_discovery();
 910   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 911 
 912   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 913   // This is the start of  the marking cycle, we're expected all
 914   // threads to have SATB queues with active set to false.
 915   satb_mq_set.set_active_all_threads(true, /* new active value */
 916                                      false /* expected_active */);
 917 
 918   _root_regions.prepare_for_scan();
 919 
 920   // update_g1_committed() will be called at the end of an evac pause
 921   // when marking is on. So, it's also called at the end of the
 922   // initial-mark pause to update the heap end, if the heap expands
 923   // during it. No need to call it here.
 924 }
 925 
 926 /*
 927  * Notice that in the next two methods, we actually leave the STS
 928  * during the barrier sync and join it immediately afterwards. If we
 929  * do not do this, the following deadlock can occur: one thread could
 930  * be in the barrier sync code, waiting for the other thread to also
 931  * sync up, whereas another one could be trying to yield, while also
 932  * waiting for the other threads to sync up too.
 933  *
 934  * Note, however, that this code is also used during remark and in
 935  * this case we should not attempt to leave / enter the STS, otherwise
 936  * we'll either hit an assert (debug / fastdebug) or deadlock
 937  * (product). So we should only leave / enter the STS if we are
 938  * operating concurrently.
 939  *
 940  * Because the thread that does the sync barrier has left the STS, it
 941  * is possible to be suspended for a Full GC or an evacuation pause
 942  * could occur. This is actually safe, since the entering the sync
 943  * barrier is one of the last things do_marking_step() does, and it
 944  * doesn't manipulate any data structures afterwards.
 945  */
 946 
 947 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 948   bool barrier_aborted;
 949 
 950   if (verbose_low()) {
 951     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 952   }
 953 
 954   {
 955     SuspendibleThreadSetLeaver sts_leave(concurrent());
 956     barrier_aborted = !_first_overflow_barrier_sync.enter();
 957   }
 958 
 959   // at this point everyone should have synced up and not be doing any
 960   // more work
 961 
 962   if (verbose_low()) {
 963     if (barrier_aborted) {
 964       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
 965     } else {
 966       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 967     }
 968   }
 969 
 970   if (barrier_aborted) {
 971     // If the barrier aborted we ignore the overflow condition and
 972     // just abort the whole marking phase as quickly as possible.
 973     return;
 974   }
 975 
 976   // If we're executing the concurrent phase of marking, reset the marking
 977   // state; otherwise the marking state is reset after reference processing,
 978   // during the remark pause.
 979   // If we reset here as a result of an overflow during the remark we will
 980   // see assertion failures from any subsequent set_concurrency_and_phase()
 981   // calls.
 982   if (concurrent()) {
 983     // let the task associated with with worker 0 do this
 984     if (worker_id == 0) {
 985       // task 0 is responsible for clearing the global data structures
 986       // We should be here because of an overflow. During STW we should
 987       // not clear the overflow flag since we rely on it being true when
 988       // we exit this method to abort the pause and restart concurrent
 989       // marking.
 990       reset_marking_state(true /* clear_overflow */);
 991       force_overflow()->update();
 992 
 993       if (G1Log::fine()) {
 994         gclog_or_tty->gclog_stamp();
 995         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 996       }
 997     }
 998   }
 999 
1000   // after this, each task should reset its own data structures then
1001   // then go into the second barrier
1002 }
1003 
1004 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1005   bool barrier_aborted;
1006 
1007   if (verbose_low()) {
1008     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1009   }
1010 
1011   {
1012     SuspendibleThreadSetLeaver sts_leave(concurrent());
1013     barrier_aborted = !_second_overflow_barrier_sync.enter();
1014   }
1015 
1016   // at this point everything should be re-initialized and ready to go
1017 
1018   if (verbose_low()) {
1019     if (barrier_aborted) {
1020       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1021     } else {
1022       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1023     }
1024   }
1025 }
1026 
1027 #ifndef PRODUCT
1028 void ForceOverflowSettings::init() {
1029   _num_remaining = G1ConcMarkForceOverflow;
1030   _force = false;
1031   update();
1032 }
1033 
1034 void ForceOverflowSettings::update() {
1035   if (_num_remaining > 0) {
1036     _num_remaining -= 1;
1037     _force = true;
1038   } else {
1039     _force = false;
1040   }
1041 }
1042 
1043 bool ForceOverflowSettings::should_force() {
1044   if (_force) {
1045     _force = false;
1046     return true;
1047   } else {
1048     return false;
1049   }
1050 }
1051 #endif // !PRODUCT
1052 
1053 class CMConcurrentMarkingTask: public AbstractGangTask {
1054 private:
1055   ConcurrentMark*       _cm;
1056   ConcurrentMarkThread* _cmt;
1057 
1058 public:
1059   void work(uint worker_id) {
1060     assert(Thread::current()->is_ConcurrentGC_thread(),
1061            "this should only be done by a conc GC thread");
1062     ResourceMark rm;
1063 
1064     double start_vtime = os::elapsedVTime();
1065 
1066     {
1067       SuspendibleThreadSetJoiner sts_join;
1068 
1069       assert(worker_id < _cm->active_tasks(), "invariant");
1070       CMTask* the_task = _cm->task(worker_id);
1071       the_task->record_start_time();
1072       if (!_cm->has_aborted()) {
1073         do {
1074           double start_vtime_sec = os::elapsedVTime();
1075           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1076 
1077           the_task->do_marking_step(mark_step_duration_ms,
1078                                     true  /* do_termination */,
1079                                     false /* is_serial*/);
1080 
1081           double end_vtime_sec = os::elapsedVTime();
1082           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1083           _cm->clear_has_overflown();
1084 
1085           _cm->do_yield_check(worker_id);
1086 
1087           jlong sleep_time_ms;
1088           if (!_cm->has_aborted() && the_task->has_aborted()) {
1089             sleep_time_ms =
1090               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1091             {
1092               SuspendibleThreadSetLeaver sts_leave;
1093               os::sleep(Thread::current(), sleep_time_ms, false);
1094             }
1095           }
1096         } while (!_cm->has_aborted() && the_task->has_aborted());
1097       }
1098       the_task->record_end_time();
1099       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1100     }
1101 
1102     double end_vtime = os::elapsedVTime();
1103     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1104   }
1105 
1106   CMConcurrentMarkingTask(ConcurrentMark* cm,
1107                           ConcurrentMarkThread* cmt) :
1108       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1109 
1110   ~CMConcurrentMarkingTask() { }
1111 };
1112 
1113 // Calculates the number of active workers for a concurrent
1114 // phase.
1115 uint ConcurrentMark::calc_parallel_marking_threads() {
1116   uint n_conc_workers = 0;
1117   if (!UseDynamicNumberOfGCThreads ||
1118       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1119        !ForceDynamicNumberOfGCThreads)) {
1120     n_conc_workers = max_parallel_marking_threads();
1121   } else {
1122     n_conc_workers =
1123       AdaptiveSizePolicy::calc_default_active_workers(
1124                                    max_parallel_marking_threads(),
1125                                    1, /* Minimum workers */
1126                                    parallel_marking_threads(),
1127                                    Threads::number_of_non_daemon_threads());
1128     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1129     // that scaling has already gone into "_max_parallel_marking_threads".
1130   }
1131   assert(n_conc_workers > 0, "Always need at least 1");
1132   return n_conc_workers;
1133 }
1134 
1135 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1136   // Currently, only survivors can be root regions.
1137   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1138   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1139 
1140   const uintx interval = PrefetchScanIntervalInBytes;
1141   HeapWord* curr = hr->bottom();
1142   const HeapWord* end = hr->top();
1143   while (curr < end) {
1144     Prefetch::read(curr, interval);
1145     oop obj = oop(curr);
1146     int size = obj->oop_iterate_size(&cl);
1147     assert(size == obj->size(), "sanity");
1148     curr += size;
1149   }
1150 }
1151 
1152 class CMRootRegionScanTask : public AbstractGangTask {
1153 private:
1154   ConcurrentMark* _cm;
1155 
1156 public:
1157   CMRootRegionScanTask(ConcurrentMark* cm) :
1158     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1159 
1160   void work(uint worker_id) {
1161     assert(Thread::current()->is_ConcurrentGC_thread(),
1162            "this should only be done by a conc GC thread");
1163 
1164     CMRootRegions* root_regions = _cm->root_regions();
1165     HeapRegion* hr = root_regions->claim_next();
1166     while (hr != NULL) {
1167       _cm->scanRootRegion(hr, worker_id);
1168       hr = root_regions->claim_next();
1169     }
1170   }
1171 };
1172 
1173 void ConcurrentMark::scanRootRegions() {
1174   double scan_start = os::elapsedTime();
1175 
1176   // Start of concurrent marking.
1177   ClassLoaderDataGraph::clear_claimed_marks();
1178 
1179   // scan_in_progress() will have been set to true only if there was
1180   // at least one root region to scan. So, if it's false, we
1181   // should not attempt to do any further work.
1182   if (root_regions()->scan_in_progress()) {
1183     if (G1Log::fine()) {
1184       gclog_or_tty->gclog_stamp();
1185       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1186     }
1187 
1188     _parallel_marking_threads = calc_parallel_marking_threads();
1189     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1190            "Maximum number of marking threads exceeded");
1191     uint active_workers = MAX2(1U, parallel_marking_threads());
1192 
1193     CMRootRegionScanTask task(this);
1194     _parallel_workers->set_active_workers(active_workers);
1195     _parallel_workers->run_task(&task);
1196 
1197     if (G1Log::fine()) {
1198       gclog_or_tty->gclog_stamp();
1199       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1200     }
1201 
1202     // It's possible that has_aborted() is true here without actually
1203     // aborting the survivor scan earlier. This is OK as it's
1204     // mainly used for sanity checking.
1205     root_regions()->scan_finished();
1206   }
1207 }
1208 
1209 void ConcurrentMark::markFromRoots() {
1210   // we might be tempted to assert that:
1211   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1212   //        "inconsistent argument?");
1213   // However that wouldn't be right, because it's possible that
1214   // a safepoint is indeed in progress as a younger generation
1215   // stop-the-world GC happens even as we mark in this generation.
1216 
1217   _restart_for_overflow = false;
1218   force_overflow_conc()->init();
1219 
1220   // _g1h has _n_par_threads
1221   _parallel_marking_threads = calc_parallel_marking_threads();
1222   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1223     "Maximum number of marking threads exceeded");
1224 
1225   uint active_workers = MAX2(1U, parallel_marking_threads());
1226   assert(active_workers > 0, "Should have been set");
1227 
1228   // Parallel task terminator is set in "set_concurrency_and_phase()"
1229   set_concurrency_and_phase(active_workers, true /* concurrent */);
1230 
1231   CMConcurrentMarkingTask markingTask(this, cmThread());
1232   _parallel_workers->set_active_workers(active_workers);
1233   _parallel_workers->run_task(&markingTask);
1234   print_stats();
1235 }
1236 
1237 // Helper class to get rid of some boilerplate code.
1238 class G1CMTraceTime : public StackObj {
1239   GCTraceTimeImpl _gc_trace_time;
1240   static bool doit_and_prepend(bool doit) {
1241     if (doit) {
1242       gclog_or_tty->put(' ');
1243     }
1244     return doit;
1245   }
1246 
1247  public:
1248   G1CMTraceTime(const char* title, bool doit)
1249     : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
1250   }
1251 };
1252 
1253 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1254   // world is stopped at this checkpoint
1255   assert(SafepointSynchronize::is_at_safepoint(),
1256          "world should be stopped");
1257 
1258   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1259 
1260   // If a full collection has happened, we shouldn't do this.
1261   if (has_aborted()) {
1262     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1263     return;
1264   }
1265 
1266   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1267 
1268   if (VerifyDuringGC) {
1269     HandleMark hm;  // handle scope
1270     g1h->prepare_for_verify();
1271     Universe::verify(VerifyOption_G1UsePrevMarking,
1272                      " VerifyDuringGC:(before)");
1273   }
1274   g1h->check_bitmaps("Remark Start");
1275 
1276   G1CollectorPolicy* g1p = g1h->g1_policy();
1277   g1p->record_concurrent_mark_remark_start();
1278 
1279   double start = os::elapsedTime();
1280 
1281   checkpointRootsFinalWork();
1282 
1283   double mark_work_end = os::elapsedTime();
1284 
1285   weakRefsWork(clear_all_soft_refs);
1286 
1287   if (has_overflown()) {
1288     // Oops.  We overflowed.  Restart concurrent marking.
1289     _restart_for_overflow = true;
1290     if (G1TraceMarkStackOverflow) {
1291       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1292     }
1293 
1294     // Verify the heap w.r.t. the previous marking bitmap.
1295     if (VerifyDuringGC) {
1296       HandleMark hm;  // handle scope
1297       g1h->prepare_for_verify();
1298       Universe::verify(VerifyOption_G1UsePrevMarking,
1299                        " VerifyDuringGC:(overflow)");
1300     }
1301 
1302     // Clear the marking state because we will be restarting
1303     // marking due to overflowing the global mark stack.
1304     reset_marking_state();
1305   } else {
1306     {
1307       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1308 
1309       // Aggregate the per-task counting data that we have accumulated
1310       // while marking.
1311       aggregate_count_data();
1312     }
1313 
1314     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1315     // We're done with marking.
1316     // This is the end of  the marking cycle, we're expected all
1317     // threads to have SATB queues with active set to true.
1318     satb_mq_set.set_active_all_threads(false, /* new active value */
1319                                        true /* expected_active */);
1320 
1321     if (VerifyDuringGC) {
1322       HandleMark hm;  // handle scope
1323       g1h->prepare_for_verify();
1324       Universe::verify(VerifyOption_G1UseNextMarking,
1325                        " VerifyDuringGC:(after)");
1326     }
1327     g1h->check_bitmaps("Remark End");
1328     assert(!restart_for_overflow(), "sanity");
1329     // Completely reset the marking state since marking completed
1330     set_non_marking_state();
1331   }
1332 
1333   // Expand the marking stack, if we have to and if we can.
1334   if (_markStack.should_expand()) {
1335     _markStack.expand();
1336   }
1337 
1338   // Statistics
1339   double now = os::elapsedTime();
1340   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1341   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1342   _remark_times.add((now - start) * 1000.0);
1343 
1344   g1p->record_concurrent_mark_remark_end();
1345 
1346   G1CMIsAliveClosure is_alive(g1h);
1347   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1348 }
1349 
1350 // Base class of the closures that finalize and verify the
1351 // liveness counting data.
1352 class CMCountDataClosureBase: public HeapRegionClosure {
1353 protected:
1354   G1CollectedHeap* _g1h;
1355   ConcurrentMark* _cm;
1356   CardTableModRefBS* _ct_bs;
1357 
1358   BitMap* _region_bm;
1359   BitMap* _card_bm;
1360 
1361   // Takes a region that's not empty (i.e., it has at least one
1362   // live object in it and sets its corresponding bit on the region
1363   // bitmap to 1. If the region is "starts humongous" it will also set
1364   // to 1 the bits on the region bitmap that correspond to its
1365   // associated "continues humongous" regions.
1366   void set_bit_for_region(HeapRegion* hr) {
1367     assert(!hr->is_continues_humongous(), "should have filtered those out");
1368 
1369     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1370     if (!hr->is_starts_humongous()) {
1371       // Normal (non-humongous) case: just set the bit.
1372       _region_bm->par_at_put(index, true);
1373     } else {
1374       // Starts humongous case: calculate how many regions are part of
1375       // this humongous region and then set the bit range.
1376       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1377       _region_bm->par_at_put_range(index, end_index, true);
1378     }
1379   }
1380 
1381 public:
1382   CMCountDataClosureBase(G1CollectedHeap* g1h,
1383                          BitMap* region_bm, BitMap* card_bm):
1384     _g1h(g1h), _cm(g1h->concurrent_mark()),
1385     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1386     _region_bm(region_bm), _card_bm(card_bm) { }
1387 };
1388 
1389 // Closure that calculates the # live objects per region. Used
1390 // for verification purposes during the cleanup pause.
1391 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1392   CMBitMapRO* _bm;
1393   size_t _region_marked_bytes;
1394 
1395 public:
1396   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1397                          BitMap* region_bm, BitMap* card_bm) :
1398     CMCountDataClosureBase(g1h, region_bm, card_bm),
1399     _bm(bm), _region_marked_bytes(0) { }
1400 
1401   bool doHeapRegion(HeapRegion* hr) {
1402 
1403     if (hr->is_continues_humongous()) {
1404       // We will ignore these here and process them when their
1405       // associated "starts humongous" region is processed (see
1406       // set_bit_for_heap_region()). Note that we cannot rely on their
1407       // associated "starts humongous" region to have their bit set to
1408       // 1 since, due to the region chunking in the parallel region
1409       // iteration, a "continues humongous" region might be visited
1410       // before its associated "starts humongous".
1411       return false;
1412     }
1413 
1414     HeapWord* ntams = hr->next_top_at_mark_start();
1415     HeapWord* start = hr->bottom();
1416 
1417     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1418            err_msg("Preconditions not met - "
1419                    "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1420                    p2i(start), p2i(ntams), p2i(hr->end())));
1421 
1422     // Find the first marked object at or after "start".
1423     start = _bm->getNextMarkedWordAddress(start, ntams);
1424 
1425     size_t marked_bytes = 0;
1426 
1427     while (start < ntams) {
1428       oop obj = oop(start);
1429       int obj_sz = obj->size();
1430       HeapWord* obj_end = start + obj_sz;
1431 
1432       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1433       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1434 
1435       // Note: if we're looking at the last region in heap - obj_end
1436       // could be actually just beyond the end of the heap; end_idx
1437       // will then correspond to a (non-existent) card that is also
1438       // just beyond the heap.
1439       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1440         // end of object is not card aligned - increment to cover
1441         // all the cards spanned by the object
1442         end_idx += 1;
1443       }
1444 
1445       // Set the bits in the card BM for the cards spanned by this object.
1446       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1447 
1448       // Add the size of this object to the number of marked bytes.
1449       marked_bytes += (size_t)obj_sz * HeapWordSize;
1450 
1451       // Find the next marked object after this one.
1452       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1453     }
1454 
1455     // Mark the allocated-since-marking portion...
1456     HeapWord* top = hr->top();
1457     if (ntams < top) {
1458       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1459       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1460 
1461       // Note: if we're looking at the last region in heap - top
1462       // could be actually just beyond the end of the heap; end_idx
1463       // will then correspond to a (non-existent) card that is also
1464       // just beyond the heap.
1465       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1466         // end of object is not card aligned - increment to cover
1467         // all the cards spanned by the object
1468         end_idx += 1;
1469       }
1470       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1471 
1472       // This definitely means the region has live objects.
1473       set_bit_for_region(hr);
1474     }
1475 
1476     // Update the live region bitmap.
1477     if (marked_bytes > 0) {
1478       set_bit_for_region(hr);
1479     }
1480 
1481     // Set the marked bytes for the current region so that
1482     // it can be queried by a calling verification routine
1483     _region_marked_bytes = marked_bytes;
1484 
1485     return false;
1486   }
1487 
1488   size_t region_marked_bytes() const { return _region_marked_bytes; }
1489 };
1490 
1491 // Heap region closure used for verifying the counting data
1492 // that was accumulated concurrently and aggregated during
1493 // the remark pause. This closure is applied to the heap
1494 // regions during the STW cleanup pause.
1495 
1496 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1497   G1CollectedHeap* _g1h;
1498   ConcurrentMark* _cm;
1499   CalcLiveObjectsClosure _calc_cl;
1500   BitMap* _region_bm;   // Region BM to be verified
1501   BitMap* _card_bm;     // Card BM to be verified
1502   bool _verbose;        // verbose output?
1503 
1504   BitMap* _exp_region_bm; // Expected Region BM values
1505   BitMap* _exp_card_bm;   // Expected card BM values
1506 
1507   int _failures;
1508 
1509 public:
1510   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1511                                 BitMap* region_bm,
1512                                 BitMap* card_bm,
1513                                 BitMap* exp_region_bm,
1514                                 BitMap* exp_card_bm,
1515                                 bool verbose) :
1516     _g1h(g1h), _cm(g1h->concurrent_mark()),
1517     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1518     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1519     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1520     _failures(0) { }
1521 
1522   int failures() const { return _failures; }
1523 
1524   bool doHeapRegion(HeapRegion* hr) {
1525     if (hr->is_continues_humongous()) {
1526       // We will ignore these here and process them when their
1527       // associated "starts humongous" region is processed (see
1528       // set_bit_for_heap_region()). Note that we cannot rely on their
1529       // associated "starts humongous" region to have their bit set to
1530       // 1 since, due to the region chunking in the parallel region
1531       // iteration, a "continues humongous" region might be visited
1532       // before its associated "starts humongous".
1533       return false;
1534     }
1535 
1536     int failures = 0;
1537 
1538     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1539     // this region and set the corresponding bits in the expected region
1540     // and card bitmaps.
1541     bool res = _calc_cl.doHeapRegion(hr);
1542     assert(res == false, "should be continuing");
1543 
1544     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1545                     Mutex::_no_safepoint_check_flag);
1546 
1547     // Verify the marked bytes for this region.
1548     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1549     size_t act_marked_bytes = hr->next_marked_bytes();
1550 
1551     // We're not OK if expected marked bytes > actual marked bytes. It means
1552     // we have missed accounting some objects during the actual marking.
1553     if (exp_marked_bytes > act_marked_bytes) {
1554       if (_verbose) {
1555         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1556                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1557                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1558       }
1559       failures += 1;
1560     }
1561 
1562     // Verify the bit, for this region, in the actual and expected
1563     // (which was just calculated) region bit maps.
1564     // We're not OK if the bit in the calculated expected region
1565     // bitmap is set and the bit in the actual region bitmap is not.
1566     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1567 
1568     bool expected = _exp_region_bm->at(index);
1569     bool actual = _region_bm->at(index);
1570     if (expected && !actual) {
1571       if (_verbose) {
1572         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1573                                "expected: %s, actual: %s",
1574                                hr->hrm_index(),
1575                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1576       }
1577       failures += 1;
1578     }
1579 
1580     // Verify that the card bit maps for the cards spanned by the current
1581     // region match. We have an error if we have a set bit in the expected
1582     // bit map and the corresponding bit in the actual bitmap is not set.
1583 
1584     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1585     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1586 
1587     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1588       expected = _exp_card_bm->at(i);
1589       actual = _card_bm->at(i);
1590 
1591       if (expected && !actual) {
1592         if (_verbose) {
1593           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1594                                  "expected: %s, actual: %s",
1595                                  hr->hrm_index(), i,
1596                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1597         }
1598         failures += 1;
1599       }
1600     }
1601 
1602     if (failures > 0 && _verbose)  {
1603       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1604                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1605                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1606                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1607     }
1608 
1609     _failures += failures;
1610 
1611     // We could stop iteration over the heap when we
1612     // find the first violating region by returning true.
1613     return false;
1614   }
1615 };
1616 
1617 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1618 protected:
1619   G1CollectedHeap* _g1h;
1620   ConcurrentMark* _cm;
1621   BitMap* _actual_region_bm;
1622   BitMap* _actual_card_bm;
1623 
1624   uint    _n_workers;
1625 
1626   BitMap* _expected_region_bm;
1627   BitMap* _expected_card_bm;
1628 
1629   int  _failures;
1630   bool _verbose;
1631 
1632   HeapRegionClaimer _hrclaimer;
1633 
1634 public:
1635   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1636                             BitMap* region_bm, BitMap* card_bm,
1637                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1638     : AbstractGangTask("G1 verify final counting"),
1639       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1640       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1641       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1642       _failures(0), _verbose(false),
1643       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1644     assert(VerifyDuringGC, "don't call this otherwise");
1645     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1646     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1647 
1648     _verbose = _cm->verbose_medium();
1649   }
1650 
1651   void work(uint worker_id) {
1652     assert(worker_id < _n_workers, "invariant");
1653 
1654     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1655                                             _actual_region_bm, _actual_card_bm,
1656                                             _expected_region_bm,
1657                                             _expected_card_bm,
1658                                             _verbose);
1659 
1660     _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
1661 
1662     Atomic::add(verify_cl.failures(), &_failures);
1663   }
1664 
1665   int failures() const { return _failures; }
1666 };
1667 
1668 // Closure that finalizes the liveness counting data.
1669 // Used during the cleanup pause.
1670 // Sets the bits corresponding to the interval [NTAMS, top]
1671 // (which contains the implicitly live objects) in the
1672 // card liveness bitmap. Also sets the bit for each region,
1673 // containing live data, in the region liveness bitmap.
1674 
1675 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1676  public:
1677   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1678                               BitMap* region_bm,
1679                               BitMap* card_bm) :
1680     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1681 
1682   bool doHeapRegion(HeapRegion* hr) {
1683 
1684     if (hr->is_continues_humongous()) {
1685       // We will ignore these here and process them when their
1686       // associated "starts humongous" region is processed (see
1687       // set_bit_for_heap_region()). Note that we cannot rely on their
1688       // associated "starts humongous" region to have their bit set to
1689       // 1 since, due to the region chunking in the parallel region
1690       // iteration, a "continues humongous" region might be visited
1691       // before its associated "starts humongous".
1692       return false;
1693     }
1694 
1695     HeapWord* ntams = hr->next_top_at_mark_start();
1696     HeapWord* top   = hr->top();
1697 
1698     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1699 
1700     // Mark the allocated-since-marking portion...
1701     if (ntams < top) {
1702       // This definitely means the region has live objects.
1703       set_bit_for_region(hr);
1704 
1705       // Now set the bits in the card bitmap for [ntams, top)
1706       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1707       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1708 
1709       // Note: if we're looking at the last region in heap - top
1710       // could be actually just beyond the end of the heap; end_idx
1711       // will then correspond to a (non-existent) card that is also
1712       // just beyond the heap.
1713       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1714         // end of object is not card aligned - increment to cover
1715         // all the cards spanned by the object
1716         end_idx += 1;
1717       }
1718 
1719       assert(end_idx <= _card_bm->size(),
1720              err_msg("oob: end_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1721                      end_idx, _card_bm->size()));
1722       assert(start_idx < _card_bm->size(),
1723              err_msg("oob: start_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1724                      start_idx, _card_bm->size()));
1725 
1726       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1727     }
1728 
1729     // Set the bit for the region if it contains live data
1730     if (hr->next_marked_bytes() > 0) {
1731       set_bit_for_region(hr);
1732     }
1733 
1734     return false;
1735   }
1736 };
1737 
1738 class G1ParFinalCountTask: public AbstractGangTask {
1739 protected:
1740   G1CollectedHeap* _g1h;
1741   ConcurrentMark* _cm;
1742   BitMap* _actual_region_bm;
1743   BitMap* _actual_card_bm;
1744 
1745   uint    _n_workers;
1746   HeapRegionClaimer _hrclaimer;
1747 
1748 public:
1749   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1750     : AbstractGangTask("G1 final counting"),
1751       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1752       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1753       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1754   }
1755 
1756   void work(uint worker_id) {
1757     assert(worker_id < _n_workers, "invariant");
1758 
1759     FinalCountDataUpdateClosure final_update_cl(_g1h,
1760                                                 _actual_region_bm,
1761                                                 _actual_card_bm);
1762 
1763     _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
1764   }
1765 };
1766 
1767 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1768   G1CollectedHeap* _g1;
1769   size_t _freed_bytes;
1770   FreeRegionList* _local_cleanup_list;
1771   HeapRegionSetCount _old_regions_removed;
1772   HeapRegionSetCount _humongous_regions_removed;
1773   HRRSCleanupTask* _hrrs_cleanup_task;
1774 
1775 public:
1776   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1777                              FreeRegionList* local_cleanup_list,
1778                              HRRSCleanupTask* hrrs_cleanup_task) :
1779     _g1(g1),
1780     _freed_bytes(0),
1781     _local_cleanup_list(local_cleanup_list),
1782     _old_regions_removed(),
1783     _humongous_regions_removed(),
1784     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1785 
1786   size_t freed_bytes() { return _freed_bytes; }
1787   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1788   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1789 
1790   bool doHeapRegion(HeapRegion *hr) {
1791     if (hr->is_continues_humongous() || hr->is_archive()) {
1792       return false;
1793     }
1794     // We use a claim value of zero here because all regions
1795     // were claimed with value 1 in the FinalCount task.
1796     _g1->reset_gc_time_stamps(hr);
1797     hr->note_end_of_marking();
1798 
1799     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1800       _freed_bytes += hr->used();
1801       hr->set_containing_set(NULL);
1802       if (hr->is_humongous()) {
1803         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1804         _humongous_regions_removed.increment(1u, hr->capacity());
1805         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1806       } else {
1807         _old_regions_removed.increment(1u, hr->capacity());
1808         _g1->free_region(hr, _local_cleanup_list, true);
1809       }
1810     } else {
1811       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1812     }
1813 
1814     return false;
1815   }
1816 };
1817 
1818 class G1ParNoteEndTask: public AbstractGangTask {
1819   friend class G1NoteEndOfConcMarkClosure;
1820 
1821 protected:
1822   G1CollectedHeap* _g1h;
1823   FreeRegionList* _cleanup_list;
1824   HeapRegionClaimer _hrclaimer;
1825 
1826 public:
1827   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1828       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1829   }
1830 
1831   void work(uint worker_id) {
1832     FreeRegionList local_cleanup_list("Local Cleanup List");
1833     HRRSCleanupTask hrrs_cleanup_task;
1834     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1835                                            &hrrs_cleanup_task);
1836     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1837     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1838 
1839     // Now update the lists
1840     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1841     {
1842       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1843       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1844 
1845       // If we iterate over the global cleanup list at the end of
1846       // cleanup to do this printing we will not guarantee to only
1847       // generate output for the newly-reclaimed regions (the list
1848       // might not be empty at the beginning of cleanup; we might
1849       // still be working on its previous contents). So we do the
1850       // printing here, before we append the new regions to the global
1851       // cleanup list.
1852 
1853       G1HRPrinter* hr_printer = _g1h->hr_printer();
1854       if (hr_printer->is_active()) {
1855         FreeRegionListIterator iter(&local_cleanup_list);
1856         while (iter.more_available()) {
1857           HeapRegion* hr = iter.get_next();
1858           hr_printer->cleanup(hr);
1859         }
1860       }
1861 
1862       _cleanup_list->add_ordered(&local_cleanup_list);
1863       assert(local_cleanup_list.is_empty(), "post-condition");
1864 
1865       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1866     }
1867   }
1868 };
1869 
1870 class G1ParScrubRemSetTask: public AbstractGangTask {
1871 protected:
1872   G1RemSet* _g1rs;
1873   BitMap* _region_bm;
1874   BitMap* _card_bm;
1875   HeapRegionClaimer _hrclaimer;
1876 
1877 public:
1878   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1879       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
1880   }
1881 
1882   void work(uint worker_id) {
1883     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
1884   }
1885 
1886 };
1887 
1888 void ConcurrentMark::cleanup() {
1889   // world is stopped at this checkpoint
1890   assert(SafepointSynchronize::is_at_safepoint(),
1891          "world should be stopped");
1892   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1893 
1894   // If a full collection has happened, we shouldn't do this.
1895   if (has_aborted()) {
1896     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1897     return;
1898   }
1899 
1900   g1h->verify_region_sets_optional();
1901 
1902   if (VerifyDuringGC) {
1903     HandleMark hm;  // handle scope
1904     g1h->prepare_for_verify();
1905     Universe::verify(VerifyOption_G1UsePrevMarking,
1906                      " VerifyDuringGC:(before)");
1907   }
1908   g1h->check_bitmaps("Cleanup Start");
1909 
1910   G1CollectorPolicy* g1p = g1h->g1_policy();
1911   g1p->record_concurrent_mark_cleanup_start();
1912 
1913   double start = os::elapsedTime();
1914 
1915   HeapRegionRemSet::reset_for_cleanup_tasks();
1916 
1917   // Do counting once more with the world stopped for good measure.
1918   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1919 
1920   g1h->workers()->run_task(&g1_par_count_task);
1921 
1922   if (VerifyDuringGC) {
1923     // Verify that the counting data accumulated during marking matches
1924     // that calculated by walking the marking bitmap.
1925 
1926     // Bitmaps to hold expected values
1927     BitMap expected_region_bm(_region_bm.size(), true);
1928     BitMap expected_card_bm(_card_bm.size(), true);
1929 
1930     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1931                                                  &_region_bm,
1932                                                  &_card_bm,
1933                                                  &expected_region_bm,
1934                                                  &expected_card_bm);
1935 
1936     g1h->workers()->run_task(&g1_par_verify_task);
1937 
1938     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1939   }
1940 
1941   size_t start_used_bytes = g1h->used();
1942   g1h->collector_state()->set_mark_in_progress(false);
1943 
1944   double count_end = os::elapsedTime();
1945   double this_final_counting_time = (count_end - start);
1946   _total_counting_time += this_final_counting_time;
1947 
1948   if (G1PrintRegionLivenessInfo) {
1949     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1950     _g1h->heap_region_iterate(&cl);
1951   }
1952 
1953   // Install newly created mark bitMap as "prev".
1954   swapMarkBitMaps();
1955 
1956   g1h->reset_gc_time_stamp();
1957 
1958   uint n_workers = _g1h->workers()->active_workers();
1959 
1960   // Note end of marking in all heap regions.
1961   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1962   g1h->workers()->run_task(&g1_par_note_end_task);
1963   g1h->check_gc_time_stamps();
1964 
1965   if (!cleanup_list_is_empty()) {
1966     // The cleanup list is not empty, so we'll have to process it
1967     // concurrently. Notify anyone else that might be wanting free
1968     // regions that there will be more free regions coming soon.
1969     g1h->set_free_regions_coming();
1970   }
1971 
1972   // call below, since it affects the metric by which we sort the heap
1973   // regions.
1974   if (G1ScrubRemSets) {
1975     double rs_scrub_start = os::elapsedTime();
1976     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
1977     g1h->workers()->run_task(&g1_par_scrub_rs_task);
1978 
1979     double rs_scrub_end = os::elapsedTime();
1980     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1981     _total_rs_scrub_time += this_rs_scrub_time;
1982   }
1983 
1984   // this will also free any regions totally full of garbage objects,
1985   // and sort the regions.
1986   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1987 
1988   // Statistics.
1989   double end = os::elapsedTime();
1990   _cleanup_times.add((end - start) * 1000.0);
1991 
1992   if (G1Log::fine()) {
1993     g1h->g1_policy()->print_heap_transition(start_used_bytes);
1994   }
1995 
1996   // Clean up will have freed any regions completely full of garbage.
1997   // Update the soft reference policy with the new heap occupancy.
1998   Universe::update_heap_info_at_gc();
1999 
2000   if (VerifyDuringGC) {
2001     HandleMark hm;  // handle scope
2002     g1h->prepare_for_verify();
2003     Universe::verify(VerifyOption_G1UsePrevMarking,
2004                      " VerifyDuringGC:(after)");
2005   }
2006 
2007   g1h->check_bitmaps("Cleanup End");
2008 
2009   g1h->verify_region_sets_optional();
2010 
2011   // We need to make this be a "collection" so any collection pause that
2012   // races with it goes around and waits for completeCleanup to finish.
2013   g1h->increment_total_collections();
2014 
2015   // Clean out dead classes and update Metaspace sizes.
2016   if (ClassUnloadingWithConcurrentMark) {
2017     ClassLoaderDataGraph::purge();
2018   }
2019   MetaspaceGC::compute_new_size();
2020 
2021   // We reclaimed old regions so we should calculate the sizes to make
2022   // sure we update the old gen/space data.
2023   g1h->g1mm()->update_sizes();
2024   g1h->allocation_context_stats().update_after_mark();
2025 
2026   g1h->trace_heap_after_concurrent_cycle();
2027 }
2028 
2029 void ConcurrentMark::completeCleanup() {
2030   if (has_aborted()) return;
2031 
2032   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2033 
2034   _cleanup_list.verify_optional();
2035   FreeRegionList tmp_free_list("Tmp Free List");
2036 
2037   if (G1ConcRegionFreeingVerbose) {
2038     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2039                            "cleanup list has %u entries",
2040                            _cleanup_list.length());
2041   }
2042 
2043   // No one else should be accessing the _cleanup_list at this point,
2044   // so it is not necessary to take any locks
2045   while (!_cleanup_list.is_empty()) {
2046     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2047     assert(hr != NULL, "Got NULL from a non-empty list");
2048     hr->par_clear();
2049     tmp_free_list.add_ordered(hr);
2050 
2051     // Instead of adding one region at a time to the secondary_free_list,
2052     // we accumulate them in the local list and move them a few at a
2053     // time. This also cuts down on the number of notify_all() calls
2054     // we do during this process. We'll also append the local list when
2055     // _cleanup_list is empty (which means we just removed the last
2056     // region from the _cleanup_list).
2057     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2058         _cleanup_list.is_empty()) {
2059       if (G1ConcRegionFreeingVerbose) {
2060         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2061                                "appending %u entries to the secondary_free_list, "
2062                                "cleanup list still has %u entries",
2063                                tmp_free_list.length(),
2064                                _cleanup_list.length());
2065       }
2066 
2067       {
2068         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2069         g1h->secondary_free_list_add(&tmp_free_list);
2070         SecondaryFreeList_lock->notify_all();
2071       }
2072 #ifndef PRODUCT
2073       if (G1StressConcRegionFreeing) {
2074         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2075           os::sleep(Thread::current(), (jlong) 1, false);
2076         }
2077       }
2078 #endif
2079     }
2080   }
2081   assert(tmp_free_list.is_empty(), "post-condition");
2082 }
2083 
2084 // Supporting Object and Oop closures for reference discovery
2085 // and processing in during marking
2086 
2087 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2088   HeapWord* addr = (HeapWord*)obj;
2089   return addr != NULL &&
2090          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2091 }
2092 
2093 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2094 // Uses the CMTask associated with a worker thread (for serial reference
2095 // processing the CMTask for worker 0 is used) to preserve (mark) and
2096 // trace referent objects.
2097 //
2098 // Using the CMTask and embedded local queues avoids having the worker
2099 // threads operating on the global mark stack. This reduces the risk
2100 // of overflowing the stack - which we would rather avoid at this late
2101 // state. Also using the tasks' local queues removes the potential
2102 // of the workers interfering with each other that could occur if
2103 // operating on the global stack.
2104 
2105 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2106   ConcurrentMark* _cm;
2107   CMTask*         _task;
2108   int             _ref_counter_limit;
2109   int             _ref_counter;
2110   bool            _is_serial;
2111  public:
2112   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2113     _cm(cm), _task(task), _is_serial(is_serial),
2114     _ref_counter_limit(G1RefProcDrainInterval) {
2115     assert(_ref_counter_limit > 0, "sanity");
2116     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2117     _ref_counter = _ref_counter_limit;
2118   }
2119 
2120   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2121   virtual void do_oop(      oop* p) { do_oop_work(p); }
2122 
2123   template <class T> void do_oop_work(T* p) {
2124     if (!_cm->has_overflown()) {
2125       oop obj = oopDesc::load_decode_heap_oop(p);
2126       if (_cm->verbose_high()) {
2127         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2128                                "*" PTR_FORMAT " = " PTR_FORMAT,
2129                                _task->worker_id(), p2i(p), p2i((void*) obj));
2130       }
2131 
2132       _task->deal_with_reference(obj);
2133       _ref_counter--;
2134 
2135       if (_ref_counter == 0) {
2136         // We have dealt with _ref_counter_limit references, pushing them
2137         // and objects reachable from them on to the local stack (and
2138         // possibly the global stack). Call CMTask::do_marking_step() to
2139         // process these entries.
2140         //
2141         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2142         // there's nothing more to do (i.e. we're done with the entries that
2143         // were pushed as a result of the CMTask::deal_with_reference() calls
2144         // above) or we overflow.
2145         //
2146         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2147         // flag while there may still be some work to do. (See the comment at
2148         // the beginning of CMTask::do_marking_step() for those conditions -
2149         // one of which is reaching the specified time target.) It is only
2150         // when CMTask::do_marking_step() returns without setting the
2151         // has_aborted() flag that the marking step has completed.
2152         do {
2153           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2154           _task->do_marking_step(mark_step_duration_ms,
2155                                  false      /* do_termination */,
2156                                  _is_serial);
2157         } while (_task->has_aborted() && !_cm->has_overflown());
2158         _ref_counter = _ref_counter_limit;
2159       }
2160     } else {
2161       if (_cm->verbose_high()) {
2162          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2163       }
2164     }
2165   }
2166 };
2167 
2168 // 'Drain' oop closure used by both serial and parallel reference processing.
2169 // Uses the CMTask associated with a given worker thread (for serial
2170 // reference processing the CMtask for worker 0 is used). Calls the
2171 // do_marking_step routine, with an unbelievably large timeout value,
2172 // to drain the marking data structures of the remaining entries
2173 // added by the 'keep alive' oop closure above.
2174 
2175 class G1CMDrainMarkingStackClosure: public VoidClosure {
2176   ConcurrentMark* _cm;
2177   CMTask*         _task;
2178   bool            _is_serial;
2179  public:
2180   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2181     _cm(cm), _task(task), _is_serial(is_serial) {
2182     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2183   }
2184 
2185   void do_void() {
2186     do {
2187       if (_cm->verbose_high()) {
2188         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2189                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2190       }
2191 
2192       // We call CMTask::do_marking_step() to completely drain the local
2193       // and global marking stacks of entries pushed by the 'keep alive'
2194       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2195       //
2196       // CMTask::do_marking_step() is called in a loop, which we'll exit
2197       // if there's nothing more to do (i.e. we've completely drained the
2198       // entries that were pushed as a a result of applying the 'keep alive'
2199       // closure to the entries on the discovered ref lists) or we overflow
2200       // the global marking stack.
2201       //
2202       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2203       // flag while there may still be some work to do. (See the comment at
2204       // the beginning of CMTask::do_marking_step() for those conditions -
2205       // one of which is reaching the specified time target.) It is only
2206       // when CMTask::do_marking_step() returns without setting the
2207       // has_aborted() flag that the marking step has completed.
2208 
2209       _task->do_marking_step(1000000000.0 /* something very large */,
2210                              true         /* do_termination */,
2211                              _is_serial);
2212     } while (_task->has_aborted() && !_cm->has_overflown());
2213   }
2214 };
2215 
2216 // Implementation of AbstractRefProcTaskExecutor for parallel
2217 // reference processing at the end of G1 concurrent marking
2218 
2219 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2220 private:
2221   G1CollectedHeap* _g1h;
2222   ConcurrentMark*  _cm;
2223   WorkGang*        _workers;
2224   uint             _active_workers;
2225 
2226 public:
2227   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2228                           ConcurrentMark* cm,
2229                           WorkGang* workers,
2230                           uint n_workers) :
2231     _g1h(g1h), _cm(cm),
2232     _workers(workers), _active_workers(n_workers) { }
2233 
2234   // Executes the given task using concurrent marking worker threads.
2235   virtual void execute(ProcessTask& task);
2236   virtual void execute(EnqueueTask& task);
2237 };
2238 
2239 class G1CMRefProcTaskProxy: public AbstractGangTask {
2240   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2241   ProcessTask&     _proc_task;
2242   G1CollectedHeap* _g1h;
2243   ConcurrentMark*  _cm;
2244 
2245 public:
2246   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2247                      G1CollectedHeap* g1h,
2248                      ConcurrentMark* cm) :
2249     AbstractGangTask("Process reference objects in parallel"),
2250     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2251     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2252     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2253   }
2254 
2255   virtual void work(uint worker_id) {
2256     ResourceMark rm;
2257     HandleMark hm;
2258     CMTask* task = _cm->task(worker_id);
2259     G1CMIsAliveClosure g1_is_alive(_g1h);
2260     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2261     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2262 
2263     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2264   }
2265 };
2266 
2267 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2268   assert(_workers != NULL, "Need parallel worker threads.");
2269   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2270 
2271   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2272 
2273   // We need to reset the concurrency level before each
2274   // proxy task execution, so that the termination protocol
2275   // and overflow handling in CMTask::do_marking_step() knows
2276   // how many workers to wait for.
2277   _cm->set_concurrency(_active_workers);
2278   _workers->run_task(&proc_task_proxy);
2279 }
2280 
2281 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2282   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2283   EnqueueTask& _enq_task;
2284 
2285 public:
2286   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2287     AbstractGangTask("Enqueue reference objects in parallel"),
2288     _enq_task(enq_task) { }
2289 
2290   virtual void work(uint worker_id) {
2291     _enq_task.work(worker_id);
2292   }
2293 };
2294 
2295 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2296   assert(_workers != NULL, "Need parallel worker threads.");
2297   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2298 
2299   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2300 
2301   // Not strictly necessary but...
2302   //
2303   // We need to reset the concurrency level before each
2304   // proxy task execution, so that the termination protocol
2305   // and overflow handling in CMTask::do_marking_step() knows
2306   // how many workers to wait for.
2307   _cm->set_concurrency(_active_workers);
2308   _workers->run_task(&enq_task_proxy);
2309 }
2310 
2311 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2312   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2313 }
2314 
2315 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2316   if (has_overflown()) {
2317     // Skip processing the discovered references if we have
2318     // overflown the global marking stack. Reference objects
2319     // only get discovered once so it is OK to not
2320     // de-populate the discovered reference lists. We could have,
2321     // but the only benefit would be that, when marking restarts,
2322     // less reference objects are discovered.
2323     return;
2324   }
2325 
2326   ResourceMark rm;
2327   HandleMark   hm;
2328 
2329   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2330 
2331   // Is alive closure.
2332   G1CMIsAliveClosure g1_is_alive(g1h);
2333 
2334   // Inner scope to exclude the cleaning of the string and symbol
2335   // tables from the displayed time.
2336   {
2337     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2338 
2339     ReferenceProcessor* rp = g1h->ref_processor_cm();
2340 
2341     // See the comment in G1CollectedHeap::ref_processing_init()
2342     // about how reference processing currently works in G1.
2343 
2344     // Set the soft reference policy
2345     rp->setup_policy(clear_all_soft_refs);
2346     assert(_markStack.isEmpty(), "mark stack should be empty");
2347 
2348     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2349     // in serial reference processing. Note these closures are also
2350     // used for serially processing (by the the current thread) the
2351     // JNI references during parallel reference processing.
2352     //
2353     // These closures do not need to synchronize with the worker
2354     // threads involved in parallel reference processing as these
2355     // instances are executed serially by the current thread (e.g.
2356     // reference processing is not multi-threaded and is thus
2357     // performed by the current thread instead of a gang worker).
2358     //
2359     // The gang tasks involved in parallel reference processing create
2360     // their own instances of these closures, which do their own
2361     // synchronization among themselves.
2362     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2363     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2364 
2365     // We need at least one active thread. If reference processing
2366     // is not multi-threaded we use the current (VMThread) thread,
2367     // otherwise we use the work gang from the G1CollectedHeap and
2368     // we utilize all the worker threads we can.
2369     bool processing_is_mt = rp->processing_is_mt();
2370     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2371     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2372 
2373     // Parallel processing task executor.
2374     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2375                                               g1h->workers(), active_workers);
2376     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2377 
2378     // Set the concurrency level. The phase was already set prior to
2379     // executing the remark task.
2380     set_concurrency(active_workers);
2381 
2382     // Set the degree of MT processing here.  If the discovery was done MT,
2383     // the number of threads involved during discovery could differ from
2384     // the number of active workers.  This is OK as long as the discovered
2385     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2386     rp->set_active_mt_degree(active_workers);
2387 
2388     // Process the weak references.
2389     const ReferenceProcessorStats& stats =
2390         rp->process_discovered_references(&g1_is_alive,
2391                                           &g1_keep_alive,
2392                                           &g1_drain_mark_stack,
2393                                           executor,
2394                                           g1h->gc_timer_cm());
2395     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2396 
2397     // The do_oop work routines of the keep_alive and drain_marking_stack
2398     // oop closures will set the has_overflown flag if we overflow the
2399     // global marking stack.
2400 
2401     assert(_markStack.overflow() || _markStack.isEmpty(),
2402             "mark stack should be empty (unless it overflowed)");
2403 
2404     if (_markStack.overflow()) {
2405       // This should have been done already when we tried to push an
2406       // entry on to the global mark stack. But let's do it again.
2407       set_has_overflown();
2408     }
2409 
2410     assert(rp->num_q() == active_workers, "why not");
2411 
2412     rp->enqueue_discovered_references(executor);
2413 
2414     rp->verify_no_references_recorded();
2415     assert(!rp->discovery_enabled(), "Post condition");
2416   }
2417 
2418   if (has_overflown()) {
2419     // We can not trust g1_is_alive if the marking stack overflowed
2420     return;
2421   }
2422 
2423   assert(_markStack.isEmpty(), "Marking should have completed");
2424 
2425   // Unload Klasses, String, Symbols, Code Cache, etc.
2426   {
2427     G1CMTraceTime trace("Unloading", G1Log::finer());
2428 
2429     if (ClassUnloadingWithConcurrentMark) {
2430       bool purged_classes;
2431 
2432       {
2433         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2434         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2435       }
2436 
2437       {
2438         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2439         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2440       }
2441     }
2442 
2443     if (G1StringDedup::is_enabled()) {
2444       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2445       G1StringDedup::unlink(&g1_is_alive);
2446     }
2447   }
2448 }
2449 
2450 void ConcurrentMark::swapMarkBitMaps() {
2451   CMBitMapRO* temp = _prevMarkBitMap;
2452   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2453   _nextMarkBitMap  = (CMBitMap*)  temp;
2454 }
2455 
2456 // Closure for marking entries in SATB buffers.
2457 class CMSATBBufferClosure : public SATBBufferClosure {
2458 private:
2459   CMTask* _task;
2460   G1CollectedHeap* _g1h;
2461 
2462   // This is very similar to CMTask::deal_with_reference, but with
2463   // more relaxed requirements for the argument, so this must be more
2464   // circumspect about treating the argument as an object.
2465   void do_entry(void* entry) const {
2466     _task->increment_refs_reached();
2467     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2468     if (entry < hr->next_top_at_mark_start()) {
2469       // Until we get here, we don't know whether entry refers to a valid
2470       // object; it could instead have been a stale reference.
2471       oop obj = static_cast<oop>(entry);
2472       assert(obj->is_oop(true /* ignore mark word */),
2473              err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
2474       _task->make_reference_grey(obj, hr);
2475     }
2476   }
2477 
2478 public:
2479   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2480     : _task(task), _g1h(g1h) { }
2481 
2482   virtual void do_buffer(void** buffer, size_t size) {
2483     for (size_t i = 0; i < size; ++i) {
2484       do_entry(buffer[i]);
2485     }
2486   }
2487 };
2488 
2489 class G1RemarkThreadsClosure : public ThreadClosure {
2490   CMSATBBufferClosure _cm_satb_cl;
2491   G1CMOopClosure _cm_cl;
2492   MarkingCodeBlobClosure _code_cl;
2493   int _thread_parity;
2494 
2495  public:
2496   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) :
2497     _cm_satb_cl(task, g1h),
2498     _cm_cl(g1h, g1h->concurrent_mark(), task),
2499     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2500     _thread_parity(Threads::thread_claim_parity()) {}
2501 
2502   void do_thread(Thread* thread) {
2503     if (thread->is_Java_thread()) {
2504       if (thread->claim_oops_do(true, _thread_parity)) {
2505         JavaThread* jt = (JavaThread*)thread;
2506 
2507         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2508         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2509         // * Alive if on the stack of an executing method
2510         // * Weakly reachable otherwise
2511         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2512         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2513         jt->nmethods_do(&_code_cl);
2514 
2515         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
2516       }
2517     } else if (thread->is_VM_thread()) {
2518       if (thread->claim_oops_do(true, _thread_parity)) {
2519         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
2520       }
2521     }
2522   }
2523 };
2524 
2525 class CMRemarkTask: public AbstractGangTask {
2526 private:
2527   ConcurrentMark* _cm;
2528 public:
2529   void work(uint worker_id) {
2530     // Since all available tasks are actually started, we should
2531     // only proceed if we're supposed to be active.
2532     if (worker_id < _cm->active_tasks()) {
2533       CMTask* task = _cm->task(worker_id);
2534       task->record_start_time();
2535       {
2536         ResourceMark rm;
2537         HandleMark hm;
2538 
2539         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2540         Threads::threads_do(&threads_f);
2541       }
2542 
2543       do {
2544         task->do_marking_step(1000000000.0 /* something very large */,
2545                               true         /* do_termination       */,
2546                               false        /* is_serial            */);
2547       } while (task->has_aborted() && !_cm->has_overflown());
2548       // If we overflow, then we do not want to restart. We instead
2549       // want to abort remark and do concurrent marking again.
2550       task->record_end_time();
2551     }
2552   }
2553 
2554   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2555     AbstractGangTask("Par Remark"), _cm(cm) {
2556     _cm->terminator()->reset_for_reuse(active_workers);
2557   }
2558 };
2559 
2560 void ConcurrentMark::checkpointRootsFinalWork() {
2561   ResourceMark rm;
2562   HandleMark   hm;
2563   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2564 
2565   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2566 
2567   g1h->ensure_parsability(false);
2568 
2569   // this is remark, so we'll use up all active threads
2570   uint active_workers = g1h->workers()->active_workers();
2571   set_concurrency_and_phase(active_workers, false /* concurrent */);
2572   // Leave _parallel_marking_threads at it's
2573   // value originally calculated in the ConcurrentMark
2574   // constructor and pass values of the active workers
2575   // through the gang in the task.
2576 
2577   {
2578     StrongRootsScope srs(active_workers);
2579 
2580     CMRemarkTask remarkTask(this, active_workers);
2581     // We will start all available threads, even if we decide that the
2582     // active_workers will be fewer. The extra ones will just bail out
2583     // immediately.
2584     g1h->workers()->run_task(&remarkTask);
2585   }
2586 
2587   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2588   guarantee(has_overflown() ||
2589             satb_mq_set.completed_buffers_num() == 0,
2590             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2591                     BOOL_TO_STR(has_overflown()),
2592                     satb_mq_set.completed_buffers_num()));
2593 
2594   print_stats();
2595 }
2596 
2597 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2598   // Note we are overriding the read-only view of the prev map here, via
2599   // the cast.
2600   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2601 }
2602 
2603 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2604   _nextMarkBitMap->clearRange(mr);
2605 }
2606 
2607 HeapRegion*
2608 ConcurrentMark::claim_region(uint worker_id) {
2609   // "checkpoint" the finger
2610   HeapWord* finger = _finger;
2611 
2612   // _heap_end will not change underneath our feet; it only changes at
2613   // yield points.
2614   while (finger < _heap_end) {
2615     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2616 
2617     // Note on how this code handles humongous regions. In the
2618     // normal case the finger will reach the start of a "starts
2619     // humongous" (SH) region. Its end will either be the end of the
2620     // last "continues humongous" (CH) region in the sequence, or the
2621     // standard end of the SH region (if the SH is the only region in
2622     // the sequence). That way claim_region() will skip over the CH
2623     // regions. However, there is a subtle race between a CM thread
2624     // executing this method and a mutator thread doing a humongous
2625     // object allocation. The two are not mutually exclusive as the CM
2626     // thread does not need to hold the Heap_lock when it gets
2627     // here. So there is a chance that claim_region() will come across
2628     // a free region that's in the progress of becoming a SH or a CH
2629     // region. In the former case, it will either
2630     //   a) Miss the update to the region's end, in which case it will
2631     //      visit every subsequent CH region, will find their bitmaps
2632     //      empty, and do nothing, or
2633     //   b) Will observe the update of the region's end (in which case
2634     //      it will skip the subsequent CH regions).
2635     // If it comes across a region that suddenly becomes CH, the
2636     // scenario will be similar to b). So, the race between
2637     // claim_region() and a humongous object allocation might force us
2638     // to do a bit of unnecessary work (due to some unnecessary bitmap
2639     // iterations) but it should not introduce and correctness issues.
2640     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2641 
2642     // Above heap_region_containing_raw may return NULL as we always scan claim
2643     // until the end of the heap. In this case, just jump to the next region.
2644     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2645 
2646     // Is the gap between reading the finger and doing the CAS too long?
2647     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2648     if (res == finger && curr_region != NULL) {
2649       // we succeeded
2650       HeapWord*   bottom        = curr_region->bottom();
2651       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2652 
2653       if (verbose_low()) {
2654         gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " "
2655                                "[" PTR_FORMAT ", " PTR_FORMAT "), "
2656                                "limit = " PTR_FORMAT,
2657                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2658       }
2659 
2660       // notice that _finger == end cannot be guaranteed here since,
2661       // someone else might have moved the finger even further
2662       assert(_finger >= end, "the finger should have moved forward");
2663 
2664       if (verbose_low()) {
2665         gclog_or_tty->print_cr("[%u] we were successful with region = "
2666                                PTR_FORMAT, worker_id, p2i(curr_region));
2667       }
2668 
2669       if (limit > bottom) {
2670         if (verbose_low()) {
2671           gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is not empty, "
2672                                  "returning it ", worker_id, p2i(curr_region));
2673         }
2674         return curr_region;
2675       } else {
2676         assert(limit == bottom,
2677                "the region limit should be at bottom");
2678         if (verbose_low()) {
2679           gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is empty, "
2680                                  "returning NULL", worker_id, p2i(curr_region));
2681         }
2682         // we return NULL and the caller should try calling
2683         // claim_region() again.
2684         return NULL;
2685       }
2686     } else {
2687       assert(_finger > finger, "the finger should have moved forward");
2688       if (verbose_low()) {
2689         if (curr_region == NULL) {
2690           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2691                                  "global finger = " PTR_FORMAT ", "
2692                                  "our finger = " PTR_FORMAT,
2693                                  worker_id, p2i(_finger), p2i(finger));
2694         } else {
2695           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2696                                  "global finger = " PTR_FORMAT ", "
2697                                  "our finger = " PTR_FORMAT,
2698                                  worker_id, p2i(_finger), p2i(finger));
2699         }
2700       }
2701 
2702       // read it again
2703       finger = _finger;
2704     }
2705   }
2706 
2707   return NULL;
2708 }
2709 
2710 #ifndef PRODUCT
2711 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
2712 private:
2713   G1CollectedHeap* _g1h;
2714   const char* _phase;
2715   int _info;
2716 
2717 public:
2718   VerifyNoCSetOops(const char* phase, int info = -1) :
2719     _g1h(G1CollectedHeap::heap()),
2720     _phase(phase),
2721     _info(info)
2722   { }
2723 
2724   void operator()(oop obj) const {
2725     guarantee(obj->is_oop(),
2726               err_msg("Non-oop " PTR_FORMAT ", phase: %s, info: %d",
2727                       p2i(obj), _phase, _info));
2728     guarantee(!_g1h->obj_in_cs(obj),
2729               err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2730                       p2i(obj), _phase, _info));
2731   }
2732 };
2733 
2734 void ConcurrentMark::verify_no_cset_oops() {
2735   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2736   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2737     return;
2738   }
2739 
2740   // Verify entries on the global mark stack
2741   _markStack.iterate(VerifyNoCSetOops("Stack"));
2742 
2743   // Verify entries on the task queues
2744   for (uint i = 0; i < _max_worker_id; ++i) {
2745     CMTaskQueue* queue = _task_queues->queue(i);
2746     queue->iterate(VerifyNoCSetOops("Queue", i));
2747   }
2748 
2749   // Verify the global finger
2750   HeapWord* global_finger = finger();
2751   if (global_finger != NULL && global_finger < _heap_end) {
2752     // The global finger always points to a heap region boundary. We
2753     // use heap_region_containing_raw() to get the containing region
2754     // given that the global finger could be pointing to a free region
2755     // which subsequently becomes continues humongous. If that
2756     // happens, heap_region_containing() will return the bottom of the
2757     // corresponding starts humongous region and the check below will
2758     // not hold any more.
2759     // Since we always iterate over all regions, we might get a NULL HeapRegion
2760     // here.
2761     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2762     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2763               err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT,
2764                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2765   }
2766 
2767   // Verify the task fingers
2768   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2769   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2770     CMTask* task = _tasks[i];
2771     HeapWord* task_finger = task->finger();
2772     if (task_finger != NULL && task_finger < _heap_end) {
2773       // See above note on the global finger verification.
2774       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2775       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2776                 !task_hr->in_collection_set(),
2777                 err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT,
2778                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2779     }
2780   }
2781 }
2782 #endif // PRODUCT
2783 
2784 // Aggregate the counting data that was constructed concurrently
2785 // with marking.
2786 class AggregateCountDataHRClosure: public HeapRegionClosure {
2787   G1CollectedHeap* _g1h;
2788   ConcurrentMark* _cm;
2789   CardTableModRefBS* _ct_bs;
2790   BitMap* _cm_card_bm;
2791   uint _max_worker_id;
2792 
2793  public:
2794   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2795                               BitMap* cm_card_bm,
2796                               uint max_worker_id) :
2797     _g1h(g1h), _cm(g1h->concurrent_mark()),
2798     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2799     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2800 
2801   bool doHeapRegion(HeapRegion* hr) {
2802     if (hr->is_continues_humongous()) {
2803       // We will ignore these here and process them when their
2804       // associated "starts humongous" region is processed.
2805       // Note that we cannot rely on their associated
2806       // "starts humongous" region to have their bit set to 1
2807       // since, due to the region chunking in the parallel region
2808       // iteration, a "continues humongous" region might be visited
2809       // before its associated "starts humongous".
2810       return false;
2811     }
2812 
2813     HeapWord* start = hr->bottom();
2814     HeapWord* limit = hr->next_top_at_mark_start();
2815     HeapWord* end = hr->end();
2816 
2817     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2818            err_msg("Preconditions not met - "
2819                    "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2820                    "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2821                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2822 
2823     assert(hr->next_marked_bytes() == 0, "Precondition");
2824 
2825     if (start == limit) {
2826       // NTAMS of this region has not been set so nothing to do.
2827       return false;
2828     }
2829 
2830     // 'start' should be in the heap.
2831     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2832     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2833     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2834 
2835     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2836     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2837     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2838 
2839     // If ntams is not card aligned then we bump card bitmap index
2840     // for limit so that we get the all the cards spanned by
2841     // the object ending at ntams.
2842     // Note: if this is the last region in the heap then ntams
2843     // could be actually just beyond the end of the the heap;
2844     // limit_idx will then  correspond to a (non-existent) card
2845     // that is also outside the heap.
2846     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
2847       limit_idx += 1;
2848     }
2849 
2850     assert(limit_idx <= end_idx, "or else use atomics");
2851 
2852     // Aggregate the "stripe" in the count data associated with hr.
2853     uint hrm_index = hr->hrm_index();
2854     size_t marked_bytes = 0;
2855 
2856     for (uint i = 0; i < _max_worker_id; i += 1) {
2857       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
2858       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
2859 
2860       // Fetch the marked_bytes in this region for task i and
2861       // add it to the running total for this region.
2862       marked_bytes += marked_bytes_array[hrm_index];
2863 
2864       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
2865       // into the global card bitmap.
2866       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
2867 
2868       while (scan_idx < limit_idx) {
2869         assert(task_card_bm->at(scan_idx) == true, "should be");
2870         _cm_card_bm->set_bit(scan_idx);
2871         assert(_cm_card_bm->at(scan_idx) == true, "should be");
2872 
2873         // BitMap::get_next_one_offset() can handle the case when
2874         // its left_offset parameter is greater than its right_offset
2875         // parameter. It does, however, have an early exit if
2876         // left_offset == right_offset. So let's limit the value
2877         // passed in for left offset here.
2878         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
2879         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
2880       }
2881     }
2882 
2883     // Update the marked bytes for this region.
2884     hr->add_to_marked_bytes(marked_bytes);
2885 
2886     // Next heap region
2887     return false;
2888   }
2889 };
2890 
2891 class G1AggregateCountDataTask: public AbstractGangTask {
2892 protected:
2893   G1CollectedHeap* _g1h;
2894   ConcurrentMark* _cm;
2895   BitMap* _cm_card_bm;
2896   uint _max_worker_id;
2897   uint _active_workers;
2898   HeapRegionClaimer _hrclaimer;
2899 
2900 public:
2901   G1AggregateCountDataTask(G1CollectedHeap* g1h,
2902                            ConcurrentMark* cm,
2903                            BitMap* cm_card_bm,
2904                            uint max_worker_id,
2905                            uint n_workers) :
2906       AbstractGangTask("Count Aggregation"),
2907       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
2908       _max_worker_id(max_worker_id),
2909       _active_workers(n_workers),
2910       _hrclaimer(_active_workers) {
2911   }
2912 
2913   void work(uint worker_id) {
2914     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
2915 
2916     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
2917   }
2918 };
2919 
2920 
2921 void ConcurrentMark::aggregate_count_data() {
2922   uint n_workers = _g1h->workers()->active_workers();
2923 
2924   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
2925                                            _max_worker_id, n_workers);
2926 
2927   _g1h->workers()->run_task(&g1_par_agg_task);
2928 }
2929 
2930 // Clear the per-worker arrays used to store the per-region counting data
2931 void ConcurrentMark::clear_all_count_data() {
2932   // Clear the global card bitmap - it will be filled during
2933   // liveness count aggregation (during remark) and the
2934   // final counting task.
2935   _card_bm.clear();
2936 
2937   // Clear the global region bitmap - it will be filled as part
2938   // of the final counting task.
2939   _region_bm.clear();
2940 
2941   uint max_regions = _g1h->max_regions();
2942   assert(_max_worker_id > 0, "uninitialized");
2943 
2944   for (uint i = 0; i < _max_worker_id; i += 1) {
2945     BitMap* task_card_bm = count_card_bitmap_for(i);
2946     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
2947 
2948     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
2949     assert(marked_bytes_array != NULL, "uninitialized");
2950 
2951     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
2952     task_card_bm->clear();
2953   }
2954 }
2955 
2956 void ConcurrentMark::print_stats() {
2957   if (verbose_stats()) {
2958     gclog_or_tty->print_cr("---------------------------------------------------------------------");
2959     for (size_t i = 0; i < _active_tasks; ++i) {
2960       _tasks[i]->print_stats();
2961       gclog_or_tty->print_cr("---------------------------------------------------------------------");
2962     }
2963   }
2964 }
2965 
2966 // abandon current marking iteration due to a Full GC
2967 void ConcurrentMark::abort() {
2968   if (!cmThread()->during_cycle() || _has_aborted) {
2969     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2970     return;
2971   }
2972 
2973   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2974   // concurrent bitmap clearing.
2975   _nextMarkBitMap->clearAll();
2976 
2977   // Note we cannot clear the previous marking bitmap here
2978   // since VerifyDuringGC verifies the objects marked during
2979   // a full GC against the previous bitmap.
2980 
2981   // Clear the liveness counting data
2982   clear_all_count_data();
2983   // Empty mark stack
2984   reset_marking_state();
2985   for (uint i = 0; i < _max_worker_id; ++i) {
2986     _tasks[i]->clear_region_fields();
2987   }
2988   _first_overflow_barrier_sync.abort();
2989   _second_overflow_barrier_sync.abort();
2990   _has_aborted = true;
2991 
2992   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2993   satb_mq_set.abandon_partial_marking();
2994   // This can be called either during or outside marking, we'll read
2995   // the expected_active value from the SATB queue set.
2996   satb_mq_set.set_active_all_threads(
2997                                  false, /* new active value */
2998                                  satb_mq_set.is_active() /* expected_active */);
2999 
3000   _g1h->trace_heap_after_concurrent_cycle();
3001   _g1h->register_concurrent_cycle_end();
3002 }
3003 
3004 static void print_ms_time_info(const char* prefix, const char* name,
3005                                NumberSeq& ns) {
3006   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3007                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3008   if (ns.num() > 0) {
3009     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3010                            prefix, ns.sd(), ns.maximum());
3011   }
3012 }
3013 
3014 void ConcurrentMark::print_summary_info() {
3015   gclog_or_tty->print_cr(" Concurrent marking:");
3016   print_ms_time_info("  ", "init marks", _init_times);
3017   print_ms_time_info("  ", "remarks", _remark_times);
3018   {
3019     print_ms_time_info("     ", "final marks", _remark_mark_times);
3020     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3021 
3022   }
3023   print_ms_time_info("  ", "cleanups", _cleanup_times);
3024   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3025                          _total_counting_time,
3026                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3027                           (double)_cleanup_times.num()
3028                          : 0.0));
3029   if (G1ScrubRemSets) {
3030     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3031                            _total_rs_scrub_time,
3032                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3033                             (double)_cleanup_times.num()
3034                            : 0.0));
3035   }
3036   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3037                          (_init_times.sum() + _remark_times.sum() +
3038                           _cleanup_times.sum())/1000.0);
3039   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3040                 "(%8.2f s marking).",
3041                 cmThread()->vtime_accum(),
3042                 cmThread()->vtime_mark_accum());
3043 }
3044 
3045 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3046   _parallel_workers->print_worker_threads_on(st);
3047 }
3048 
3049 void ConcurrentMark::print_on_error(outputStream* st) const {
3050   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3051       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3052   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3053   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3054 }
3055 
3056 // We take a break if someone is trying to stop the world.
3057 bool ConcurrentMark::do_yield_check(uint worker_id) {
3058   if (SuspendibleThreadSet::should_yield()) {
3059     if (worker_id == 0) {
3060       _g1h->g1_policy()->record_concurrent_pause();
3061     }
3062     SuspendibleThreadSet::yield();
3063     return true;
3064   } else {
3065     return false;
3066   }
3067 }
3068 
3069 #ifndef PRODUCT
3070 // for debugging purposes
3071 void ConcurrentMark::print_finger() {
3072   gclog_or_tty->print_cr("heap [" PTR_FORMAT ", " PTR_FORMAT "), global finger = " PTR_FORMAT,
3073                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3074   for (uint i = 0; i < _max_worker_id; ++i) {
3075     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3076   }
3077   gclog_or_tty->cr();
3078 }
3079 #endif
3080 
3081 // Closure for iteration over bitmaps
3082 class CMBitMapClosure : public BitMapClosure {
3083 private:
3084   // the bitmap that is being iterated over
3085   CMBitMap*                   _nextMarkBitMap;
3086   ConcurrentMark*             _cm;
3087   CMTask*                     _task;
3088 
3089 public:
3090   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3091     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3092 
3093   bool do_bit(size_t offset) {
3094     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3095     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3096     assert( addr < _cm->finger(), "invariant");
3097 
3098     statsOnly( _task->increase_objs_found_on_bitmap() );
3099     assert(addr >= _task->finger(), "invariant");
3100 
3101     // We move that task's local finger along.
3102     _task->move_finger_to(addr);
3103 
3104     _task->scan_object(oop(addr));
3105     // we only partially drain the local queue and global stack
3106     _task->drain_local_queue(true);
3107     _task->drain_global_stack(true);
3108 
3109     // if the has_aborted flag has been raised, we need to bail out of
3110     // the iteration
3111     return !_task->has_aborted();
3112   }
3113 };
3114 
3115 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3116                                ConcurrentMark* cm,
3117                                CMTask* task)
3118   : _g1h(g1h), _cm(cm), _task(task) {
3119   assert(_ref_processor == NULL, "should be initialized to NULL");
3120 
3121   if (G1UseConcMarkReferenceProcessing) {
3122     _ref_processor = g1h->ref_processor_cm();
3123     assert(_ref_processor != NULL, "should not be NULL");
3124   }
3125 }
3126 
3127 void CMTask::setup_for_region(HeapRegion* hr) {
3128   assert(hr != NULL,
3129         "claim_region() should have filtered out NULL regions");
3130   assert(!hr->is_continues_humongous(),
3131         "claim_region() should have filtered out continues humongous regions");
3132 
3133   if (_cm->verbose_low()) {
3134     gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT,
3135                            _worker_id, p2i(hr));
3136   }
3137 
3138   _curr_region  = hr;
3139   _finger       = hr->bottom();
3140   update_region_limit();
3141 }
3142 
3143 void CMTask::update_region_limit() {
3144   HeapRegion* hr            = _curr_region;
3145   HeapWord* bottom          = hr->bottom();
3146   HeapWord* limit           = hr->next_top_at_mark_start();
3147 
3148   if (limit == bottom) {
3149     if (_cm->verbose_low()) {
3150       gclog_or_tty->print_cr("[%u] found an empty region "
3151                              "[" PTR_FORMAT ", " PTR_FORMAT ")",
3152                              _worker_id, p2i(bottom), p2i(limit));
3153     }
3154     // The region was collected underneath our feet.
3155     // We set the finger to bottom to ensure that the bitmap
3156     // iteration that will follow this will not do anything.
3157     // (this is not a condition that holds when we set the region up,
3158     // as the region is not supposed to be empty in the first place)
3159     _finger = bottom;
3160   } else if (limit >= _region_limit) {
3161     assert(limit >= _finger, "peace of mind");
3162   } else {
3163     assert(limit < _region_limit, "only way to get here");
3164     // This can happen under some pretty unusual circumstances.  An
3165     // evacuation pause empties the region underneath our feet (NTAMS
3166     // at bottom). We then do some allocation in the region (NTAMS
3167     // stays at bottom), followed by the region being used as a GC
3168     // alloc region (NTAMS will move to top() and the objects
3169     // originally below it will be grayed). All objects now marked in
3170     // the region are explicitly grayed, if below the global finger,
3171     // and we do not need in fact to scan anything else. So, we simply
3172     // set _finger to be limit to ensure that the bitmap iteration
3173     // doesn't do anything.
3174     _finger = limit;
3175   }
3176 
3177   _region_limit = limit;
3178 }
3179 
3180 void CMTask::giveup_current_region() {
3181   assert(_curr_region != NULL, "invariant");
3182   if (_cm->verbose_low()) {
3183     gclog_or_tty->print_cr("[%u] giving up region " PTR_FORMAT,
3184                            _worker_id, p2i(_curr_region));
3185   }
3186   clear_region_fields();
3187 }
3188 
3189 void CMTask::clear_region_fields() {
3190   // Values for these three fields that indicate that we're not
3191   // holding on to a region.
3192   _curr_region   = NULL;
3193   _finger        = NULL;
3194   _region_limit  = NULL;
3195 }
3196 
3197 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3198   if (cm_oop_closure == NULL) {
3199     assert(_cm_oop_closure != NULL, "invariant");
3200   } else {
3201     assert(_cm_oop_closure == NULL, "invariant");
3202   }
3203   _cm_oop_closure = cm_oop_closure;
3204 }
3205 
3206 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3207   guarantee(nextMarkBitMap != NULL, "invariant");
3208 
3209   if (_cm->verbose_low()) {
3210     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3211   }
3212 
3213   _nextMarkBitMap                = nextMarkBitMap;
3214   clear_region_fields();
3215 
3216   _calls                         = 0;
3217   _elapsed_time_ms               = 0.0;
3218   _termination_time_ms           = 0.0;
3219   _termination_start_time_ms     = 0.0;
3220 
3221 #if _MARKING_STATS_
3222   _aborted                       = 0;
3223   _aborted_overflow              = 0;
3224   _aborted_cm_aborted            = 0;
3225   _aborted_yield                 = 0;
3226   _aborted_timed_out             = 0;
3227   _aborted_satb                  = 0;
3228   _aborted_termination           = 0;
3229   _steal_attempts                = 0;
3230   _steals                        = 0;
3231   _local_pushes                  = 0;
3232   _local_pops                    = 0;
3233   _local_max_size                = 0;
3234   _objs_scanned                  = 0;
3235   _global_pushes                 = 0;
3236   _global_pops                   = 0;
3237   _global_max_size               = 0;
3238   _global_transfers_to           = 0;
3239   _global_transfers_from         = 0;
3240   _regions_claimed               = 0;
3241   _objs_found_on_bitmap          = 0;
3242   _satb_buffers_processed        = 0;
3243 #endif // _MARKING_STATS_
3244 }
3245 
3246 bool CMTask::should_exit_termination() {
3247   regular_clock_call();
3248   // This is called when we are in the termination protocol. We should
3249   // quit if, for some reason, this task wants to abort or the global
3250   // stack is not empty (this means that we can get work from it).
3251   return !_cm->mark_stack_empty() || has_aborted();
3252 }
3253 
3254 void CMTask::reached_limit() {
3255   assert(_words_scanned >= _words_scanned_limit ||
3256          _refs_reached >= _refs_reached_limit ,
3257          "shouldn't have been called otherwise");
3258   regular_clock_call();
3259 }
3260 
3261 void CMTask::regular_clock_call() {
3262   if (has_aborted()) return;
3263 
3264   // First, we need to recalculate the words scanned and refs reached
3265   // limits for the next clock call.
3266   recalculate_limits();
3267 
3268   // During the regular clock call we do the following
3269 
3270   // (1) If an overflow has been flagged, then we abort.
3271   if (_cm->has_overflown()) {
3272     set_has_aborted();
3273     return;
3274   }
3275 
3276   // If we are not concurrent (i.e. we're doing remark) we don't need
3277   // to check anything else. The other steps are only needed during
3278   // the concurrent marking phase.
3279   if (!concurrent()) return;
3280 
3281   // (2) If marking has been aborted for Full GC, then we also abort.
3282   if (_cm->has_aborted()) {
3283     set_has_aborted();
3284     statsOnly( ++_aborted_cm_aborted );
3285     return;
3286   }
3287 
3288   double curr_time_ms = os::elapsedVTime() * 1000.0;
3289 
3290   // (3) If marking stats are enabled, then we update the step history.
3291 #if _MARKING_STATS_
3292   if (_words_scanned >= _words_scanned_limit) {
3293     ++_clock_due_to_scanning;
3294   }
3295   if (_refs_reached >= _refs_reached_limit) {
3296     ++_clock_due_to_marking;
3297   }
3298 
3299   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3300   _interval_start_time_ms = curr_time_ms;
3301   _all_clock_intervals_ms.add(last_interval_ms);
3302 
3303   if (_cm->verbose_medium()) {
3304       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3305                         "scanned = " SIZE_FORMAT "%s, refs reached = " SIZE_FORMAT "%s",
3306                         _worker_id, last_interval_ms,
3307                         _words_scanned,
3308                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3309                         _refs_reached,
3310                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3311   }
3312 #endif // _MARKING_STATS_
3313 
3314   // (4) We check whether we should yield. If we have to, then we abort.
3315   if (SuspendibleThreadSet::should_yield()) {
3316     // We should yield. To do this we abort the task. The caller is
3317     // responsible for yielding.
3318     set_has_aborted();
3319     statsOnly( ++_aborted_yield );
3320     return;
3321   }
3322 
3323   // (5) We check whether we've reached our time quota. If we have,
3324   // then we abort.
3325   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3326   if (elapsed_time_ms > _time_target_ms) {
3327     set_has_aborted();
3328     _has_timed_out = true;
3329     statsOnly( ++_aborted_timed_out );
3330     return;
3331   }
3332 
3333   // (6) Finally, we check whether there are enough completed STAB
3334   // buffers available for processing. If there are, we abort.
3335   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3336   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3337     if (_cm->verbose_low()) {
3338       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3339                              _worker_id);
3340     }
3341     // we do need to process SATB buffers, we'll abort and restart
3342     // the marking task to do so
3343     set_has_aborted();
3344     statsOnly( ++_aborted_satb );
3345     return;
3346   }
3347 }
3348 
3349 void CMTask::recalculate_limits() {
3350   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3351   _words_scanned_limit      = _real_words_scanned_limit;
3352 
3353   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3354   _refs_reached_limit       = _real_refs_reached_limit;
3355 }
3356 
3357 void CMTask::decrease_limits() {
3358   // This is called when we believe that we're going to do an infrequent
3359   // operation which will increase the per byte scanned cost (i.e. move
3360   // entries to/from the global stack). It basically tries to decrease the
3361   // scanning limit so that the clock is called earlier.
3362 
3363   if (_cm->verbose_medium()) {
3364     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3365   }
3366 
3367   _words_scanned_limit = _real_words_scanned_limit -
3368     3 * words_scanned_period / 4;
3369   _refs_reached_limit  = _real_refs_reached_limit -
3370     3 * refs_reached_period / 4;
3371 }
3372 
3373 void CMTask::move_entries_to_global_stack() {
3374   // local array where we'll store the entries that will be popped
3375   // from the local queue
3376   oop buffer[global_stack_transfer_size];
3377 
3378   int n = 0;
3379   oop obj;
3380   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3381     buffer[n] = obj;
3382     ++n;
3383   }
3384 
3385   if (n > 0) {
3386     // we popped at least one entry from the local queue
3387 
3388     statsOnly( ++_global_transfers_to; _local_pops += n );
3389 
3390     if (!_cm->mark_stack_push(buffer, n)) {
3391       if (_cm->verbose_low()) {
3392         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3393                                _worker_id);
3394       }
3395       set_has_aborted();
3396     } else {
3397       // the transfer was successful
3398 
3399       if (_cm->verbose_medium()) {
3400         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3401                                _worker_id, n);
3402       }
3403       statsOnly( size_t tmp_size = _cm->mark_stack_size();
3404                  if (tmp_size > _global_max_size) {
3405                    _global_max_size = tmp_size;
3406                  }
3407                  _global_pushes += n );
3408     }
3409   }
3410 
3411   // this operation was quite expensive, so decrease the limits
3412   decrease_limits();
3413 }
3414 
3415 void CMTask::get_entries_from_global_stack() {
3416   // local array where we'll store the entries that will be popped
3417   // from the global stack.
3418   oop buffer[global_stack_transfer_size];
3419   int n;
3420   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3421   assert(n <= global_stack_transfer_size,
3422          "we should not pop more than the given limit");
3423   if (n > 0) {
3424     // yes, we did actually pop at least one entry
3425 
3426     statsOnly( ++_global_transfers_from; _global_pops += n );
3427     if (_cm->verbose_medium()) {
3428       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3429                              _worker_id, n);
3430     }
3431     for (int i = 0; i < n; ++i) {
3432       bool success = _task_queue->push(buffer[i]);
3433       // We only call this when the local queue is empty or under a
3434       // given target limit. So, we do not expect this push to fail.
3435       assert(success, "invariant");
3436     }
3437 
3438     statsOnly( size_t tmp_size = (size_t)_task_queue->size();
3439                if (tmp_size > _local_max_size) {
3440                  _local_max_size = tmp_size;
3441                }
3442                _local_pushes += n );
3443   }
3444 
3445   // this operation was quite expensive, so decrease the limits
3446   decrease_limits();
3447 }
3448 
3449 void CMTask::drain_local_queue(bool partially) {
3450   if (has_aborted()) return;
3451 
3452   // Decide what the target size is, depending whether we're going to
3453   // drain it partially (so that other tasks can steal if they run out
3454   // of things to do) or totally (at the very end).
3455   size_t target_size;
3456   if (partially) {
3457     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3458   } else {
3459     target_size = 0;
3460   }
3461 
3462   if (_task_queue->size() > target_size) {
3463     if (_cm->verbose_high()) {
3464       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3465                              _worker_id, target_size);
3466     }
3467 
3468     oop obj;
3469     bool ret = _task_queue->pop_local(obj);
3470     while (ret) {
3471       statsOnly( ++_local_pops );
3472 
3473       if (_cm->verbose_high()) {
3474         gclog_or_tty->print_cr("[%u] popped " PTR_FORMAT, _worker_id,
3475                                p2i((void*) obj));
3476       }
3477 
3478       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3479       assert(!_g1h->is_on_master_free_list(
3480                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3481 
3482       scan_object(obj);
3483 
3484       if (_task_queue->size() <= target_size || has_aborted()) {
3485         ret = false;
3486       } else {
3487         ret = _task_queue->pop_local(obj);
3488       }
3489     }
3490 
3491     if (_cm->verbose_high()) {
3492       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3493                              _worker_id, _task_queue->size());
3494     }
3495   }
3496 }
3497 
3498 void CMTask::drain_global_stack(bool partially) {
3499   if (has_aborted()) return;
3500 
3501   // We have a policy to drain the local queue before we attempt to
3502   // drain the global stack.
3503   assert(partially || _task_queue->size() == 0, "invariant");
3504 
3505   // Decide what the target size is, depending whether we're going to
3506   // drain it partially (so that other tasks can steal if they run out
3507   // of things to do) or totally (at the very end).  Notice that,
3508   // because we move entries from the global stack in chunks or
3509   // because another task might be doing the same, we might in fact
3510   // drop below the target. But, this is not a problem.
3511   size_t target_size;
3512   if (partially) {
3513     target_size = _cm->partial_mark_stack_size_target();
3514   } else {
3515     target_size = 0;
3516   }
3517 
3518   if (_cm->mark_stack_size() > target_size) {
3519     if (_cm->verbose_low()) {
3520       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3521                              _worker_id, target_size);
3522     }
3523 
3524     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3525       get_entries_from_global_stack();
3526       drain_local_queue(partially);
3527     }
3528 
3529     if (_cm->verbose_low()) {
3530       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3531                              _worker_id, _cm->mark_stack_size());
3532     }
3533   }
3534 }
3535 
3536 // SATB Queue has several assumptions on whether to call the par or
3537 // non-par versions of the methods. this is why some of the code is
3538 // replicated. We should really get rid of the single-threaded version
3539 // of the code to simplify things.
3540 void CMTask::drain_satb_buffers() {
3541   if (has_aborted()) return;
3542 
3543   // We set this so that the regular clock knows that we're in the
3544   // middle of draining buffers and doesn't set the abort flag when it
3545   // notices that SATB buffers are available for draining. It'd be
3546   // very counter productive if it did that. :-)
3547   _draining_satb_buffers = true;
3548 
3549   CMSATBBufferClosure satb_cl(this, _g1h);
3550   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3551 
3552   // This keeps claiming and applying the closure to completed buffers
3553   // until we run out of buffers or we need to abort.
3554   while (!has_aborted() &&
3555          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3556     if (_cm->verbose_medium()) {
3557       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3558     }
3559     statsOnly( ++_satb_buffers_processed );
3560     regular_clock_call();
3561   }
3562 
3563   _draining_satb_buffers = false;
3564 
3565   assert(has_aborted() ||
3566          concurrent() ||
3567          satb_mq_set.completed_buffers_num() == 0, "invariant");
3568 
3569   // again, this was a potentially expensive operation, decrease the
3570   // limits to get the regular clock call early
3571   decrease_limits();
3572 }
3573 
3574 void CMTask::print_stats() {
3575   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3576                          _worker_id, _calls);
3577   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3578                          _elapsed_time_ms, _termination_time_ms);
3579   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3580                          _step_times_ms.num(), _step_times_ms.avg(),
3581                          _step_times_ms.sd());
3582   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3583                          _step_times_ms.maximum(), _step_times_ms.sum());
3584 
3585 #if _MARKING_STATS_
3586   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3587                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3588                          _all_clock_intervals_ms.sd());
3589   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3590                          _all_clock_intervals_ms.maximum(),
3591                          _all_clock_intervals_ms.sum());
3592   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT,
3593                          _clock_due_to_scanning, _clock_due_to_marking);
3594   gclog_or_tty->print_cr("  Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT,
3595                          _objs_scanned, _objs_found_on_bitmap);
3596   gclog_or_tty->print_cr("  Local Queue:  pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3597                          _local_pushes, _local_pops, _local_max_size);
3598   gclog_or_tty->print_cr("  Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3599                          _global_pushes, _global_pops, _global_max_size);
3600   gclog_or_tty->print_cr("                transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT,
3601                          _global_transfers_to,_global_transfers_from);
3602   gclog_or_tty->print_cr("  Regions: claimed = " SIZE_FORMAT, _regions_claimed);
3603   gclog_or_tty->print_cr("  SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed);
3604   gclog_or_tty->print_cr("  Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT,
3605                          _steal_attempts, _steals);
3606   gclog_or_tty->print_cr("  Aborted: " SIZE_FORMAT ", due to", _aborted);
3607   gclog_or_tty->print_cr("    overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT,
3608                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3609   gclog_or_tty->print_cr("    time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT,
3610                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3611 #endif // _MARKING_STATS_
3612 }
3613 
3614 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3615   return _task_queues->steal(worker_id, hash_seed, obj);
3616 }
3617 
3618 /*****************************************************************************
3619 
3620     The do_marking_step(time_target_ms, ...) method is the building
3621     block of the parallel marking framework. It can be called in parallel
3622     with other invocations of do_marking_step() on different tasks
3623     (but only one per task, obviously) and concurrently with the
3624     mutator threads, or during remark, hence it eliminates the need
3625     for two versions of the code. When called during remark, it will
3626     pick up from where the task left off during the concurrent marking
3627     phase. Interestingly, tasks are also claimable during evacuation
3628     pauses too, since do_marking_step() ensures that it aborts before
3629     it needs to yield.
3630 
3631     The data structures that it uses to do marking work are the
3632     following:
3633 
3634       (1) Marking Bitmap. If there are gray objects that appear only
3635       on the bitmap (this happens either when dealing with an overflow
3636       or when the initial marking phase has simply marked the roots
3637       and didn't push them on the stack), then tasks claim heap
3638       regions whose bitmap they then scan to find gray objects. A
3639       global finger indicates where the end of the last claimed region
3640       is. A local finger indicates how far into the region a task has
3641       scanned. The two fingers are used to determine how to gray an
3642       object (i.e. whether simply marking it is OK, as it will be
3643       visited by a task in the future, or whether it needs to be also
3644       pushed on a stack).
3645 
3646       (2) Local Queue. The local queue of the task which is accessed
3647       reasonably efficiently by the task. Other tasks can steal from
3648       it when they run out of work. Throughout the marking phase, a
3649       task attempts to keep its local queue short but not totally
3650       empty, so that entries are available for stealing by other
3651       tasks. Only when there is no more work, a task will totally
3652       drain its local queue.
3653 
3654       (3) Global Mark Stack. This handles local queue overflow. During
3655       marking only sets of entries are moved between it and the local
3656       queues, as access to it requires a mutex and more fine-grain
3657       interaction with it which might cause contention. If it
3658       overflows, then the marking phase should restart and iterate
3659       over the bitmap to identify gray objects. Throughout the marking
3660       phase, tasks attempt to keep the global mark stack at a small
3661       length but not totally empty, so that entries are available for
3662       popping by other tasks. Only when there is no more work, tasks
3663       will totally drain the global mark stack.
3664 
3665       (4) SATB Buffer Queue. This is where completed SATB buffers are
3666       made available. Buffers are regularly removed from this queue
3667       and scanned for roots, so that the queue doesn't get too
3668       long. During remark, all completed buffers are processed, as
3669       well as the filled in parts of any uncompleted buffers.
3670 
3671     The do_marking_step() method tries to abort when the time target
3672     has been reached. There are a few other cases when the
3673     do_marking_step() method also aborts:
3674 
3675       (1) When the marking phase has been aborted (after a Full GC).
3676 
3677       (2) When a global overflow (on the global stack) has been
3678       triggered. Before the task aborts, it will actually sync up with
3679       the other tasks to ensure that all the marking data structures
3680       (local queues, stacks, fingers etc.)  are re-initialized so that
3681       when do_marking_step() completes, the marking phase can
3682       immediately restart.
3683 
3684       (3) When enough completed SATB buffers are available. The
3685       do_marking_step() method only tries to drain SATB buffers right
3686       at the beginning. So, if enough buffers are available, the
3687       marking step aborts and the SATB buffers are processed at
3688       the beginning of the next invocation.
3689 
3690       (4) To yield. when we have to yield then we abort and yield
3691       right at the end of do_marking_step(). This saves us from a lot
3692       of hassle as, by yielding we might allow a Full GC. If this
3693       happens then objects will be compacted underneath our feet, the
3694       heap might shrink, etc. We save checking for this by just
3695       aborting and doing the yield right at the end.
3696 
3697     From the above it follows that the do_marking_step() method should
3698     be called in a loop (or, otherwise, regularly) until it completes.
3699 
3700     If a marking step completes without its has_aborted() flag being
3701     true, it means it has completed the current marking phase (and
3702     also all other marking tasks have done so and have all synced up).
3703 
3704     A method called regular_clock_call() is invoked "regularly" (in
3705     sub ms intervals) throughout marking. It is this clock method that
3706     checks all the abort conditions which were mentioned above and
3707     decides when the task should abort. A work-based scheme is used to
3708     trigger this clock method: when the number of object words the
3709     marking phase has scanned or the number of references the marking
3710     phase has visited reach a given limit. Additional invocations to
3711     the method clock have been planted in a few other strategic places
3712     too. The initial reason for the clock method was to avoid calling
3713     vtime too regularly, as it is quite expensive. So, once it was in
3714     place, it was natural to piggy-back all the other conditions on it
3715     too and not constantly check them throughout the code.
3716 
3717     If do_termination is true then do_marking_step will enter its
3718     termination protocol.
3719 
3720     The value of is_serial must be true when do_marking_step is being
3721     called serially (i.e. by the VMThread) and do_marking_step should
3722     skip any synchronization in the termination and overflow code.
3723     Examples include the serial remark code and the serial reference
3724     processing closures.
3725 
3726     The value of is_serial must be false when do_marking_step is
3727     being called by any of the worker threads in a work gang.
3728     Examples include the concurrent marking code (CMMarkingTask),
3729     the MT remark code, and the MT reference processing closures.
3730 
3731  *****************************************************************************/
3732 
3733 void CMTask::do_marking_step(double time_target_ms,
3734                              bool do_termination,
3735                              bool is_serial) {
3736   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3737   assert(concurrent() == _cm->concurrent(), "they should be the same");
3738 
3739   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3740   assert(_task_queues != NULL, "invariant");
3741   assert(_task_queue != NULL, "invariant");
3742   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
3743 
3744   assert(!_claimed,
3745          "only one thread should claim this task at any one time");
3746 
3747   // OK, this doesn't safeguard again all possible scenarios, as it is
3748   // possible for two threads to set the _claimed flag at the same
3749   // time. But it is only for debugging purposes anyway and it will
3750   // catch most problems.
3751   _claimed = true;
3752 
3753   _start_time_ms = os::elapsedVTime() * 1000.0;
3754   statsOnly( _interval_start_time_ms = _start_time_ms );
3755 
3756   // If do_stealing is true then do_marking_step will attempt to
3757   // steal work from the other CMTasks. It only makes sense to
3758   // enable stealing when the termination protocol is enabled
3759   // and do_marking_step() is not being called serially.
3760   bool do_stealing = do_termination && !is_serial;
3761 
3762   double diff_prediction_ms =
3763     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
3764   _time_target_ms = time_target_ms - diff_prediction_ms;
3765 
3766   // set up the variables that are used in the work-based scheme to
3767   // call the regular clock method
3768   _words_scanned = 0;
3769   _refs_reached  = 0;
3770   recalculate_limits();
3771 
3772   // clear all flags
3773   clear_has_aborted();
3774   _has_timed_out = false;
3775   _draining_satb_buffers = false;
3776 
3777   ++_calls;
3778 
3779   if (_cm->verbose_low()) {
3780     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
3781                            "target = %1.2lfms >>>>>>>>>>",
3782                            _worker_id, _calls, _time_target_ms);
3783   }
3784 
3785   // Set up the bitmap and oop closures. Anything that uses them is
3786   // eventually called from this method, so it is OK to allocate these
3787   // statically.
3788   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
3789   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
3790   set_cm_oop_closure(&cm_oop_closure);
3791 
3792   if (_cm->has_overflown()) {
3793     // This can happen if the mark stack overflows during a GC pause
3794     // and this task, after a yield point, restarts. We have to abort
3795     // as we need to get into the overflow protocol which happens
3796     // right at the end of this task.
3797     set_has_aborted();
3798   }
3799 
3800   // First drain any available SATB buffers. After this, we will not
3801   // look at SATB buffers before the next invocation of this method.
3802   // If enough completed SATB buffers are queued up, the regular clock
3803   // will abort this task so that it restarts.
3804   drain_satb_buffers();
3805   // ...then partially drain the local queue and the global stack
3806   drain_local_queue(true);
3807   drain_global_stack(true);
3808 
3809   do {
3810     if (!has_aborted() && _curr_region != NULL) {
3811       // This means that we're already holding on to a region.
3812       assert(_finger != NULL, "if region is not NULL, then the finger "
3813              "should not be NULL either");
3814 
3815       // We might have restarted this task after an evacuation pause
3816       // which might have evacuated the region we're holding on to
3817       // underneath our feet. Let's read its limit again to make sure
3818       // that we do not iterate over a region of the heap that
3819       // contains garbage (update_region_limit() will also move
3820       // _finger to the start of the region if it is found empty).
3821       update_region_limit();
3822       // We will start from _finger not from the start of the region,
3823       // as we might be restarting this task after aborting half-way
3824       // through scanning this region. In this case, _finger points to
3825       // the address where we last found a marked object. If this is a
3826       // fresh region, _finger points to start().
3827       MemRegion mr = MemRegion(_finger, _region_limit);
3828 
3829       if (_cm->verbose_low()) {
3830         gclog_or_tty->print_cr("[%u] we're scanning part "
3831                                "[" PTR_FORMAT ", " PTR_FORMAT ") "
3832                                "of region " HR_FORMAT,
3833                                _worker_id, p2i(_finger), p2i(_region_limit),
3834                                HR_FORMAT_PARAMS(_curr_region));
3835       }
3836 
3837       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
3838              "humongous regions should go around loop once only");
3839 
3840       // Some special cases:
3841       // If the memory region is empty, we can just give up the region.
3842       // If the current region is humongous then we only need to check
3843       // the bitmap for the bit associated with the start of the object,
3844       // scan the object if it's live, and give up the region.
3845       // Otherwise, let's iterate over the bitmap of the part of the region
3846       // that is left.
3847       // If the iteration is successful, give up the region.
3848       if (mr.is_empty()) {
3849         giveup_current_region();
3850         regular_clock_call();
3851       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
3852         if (_nextMarkBitMap->isMarked(mr.start())) {
3853           // The object is marked - apply the closure
3854           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
3855           bitmap_closure.do_bit(offset);
3856         }
3857         // Even if this task aborted while scanning the humongous object
3858         // we can (and should) give up the current region.
3859         giveup_current_region();
3860         regular_clock_call();
3861       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
3862         giveup_current_region();
3863         regular_clock_call();
3864       } else {
3865         assert(has_aborted(), "currently the only way to do so");
3866         // The only way to abort the bitmap iteration is to return
3867         // false from the do_bit() method. However, inside the
3868         // do_bit() method we move the _finger to point to the
3869         // object currently being looked at. So, if we bail out, we
3870         // have definitely set _finger to something non-null.
3871         assert(_finger != NULL, "invariant");
3872 
3873         // Region iteration was actually aborted. So now _finger
3874         // points to the address of the object we last scanned. If we
3875         // leave it there, when we restart this task, we will rescan
3876         // the object. It is easy to avoid this. We move the finger by
3877         // enough to point to the next possible object header (the
3878         // bitmap knows by how much we need to move it as it knows its
3879         // granularity).
3880         assert(_finger < _region_limit, "invariant");
3881         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
3882         // Check if bitmap iteration was aborted while scanning the last object
3883         if (new_finger >= _region_limit) {
3884           giveup_current_region();
3885         } else {
3886           move_finger_to(new_finger);
3887         }
3888       }
3889     }
3890     // At this point we have either completed iterating over the
3891     // region we were holding on to, or we have aborted.
3892 
3893     // We then partially drain the local queue and the global stack.
3894     // (Do we really need this?)
3895     drain_local_queue(true);
3896     drain_global_stack(true);
3897 
3898     // Read the note on the claim_region() method on why it might
3899     // return NULL with potentially more regions available for
3900     // claiming and why we have to check out_of_regions() to determine
3901     // whether we're done or not.
3902     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
3903       // We are going to try to claim a new region. We should have
3904       // given up on the previous one.
3905       // Separated the asserts so that we know which one fires.
3906       assert(_curr_region  == NULL, "invariant");
3907       assert(_finger       == NULL, "invariant");
3908       assert(_region_limit == NULL, "invariant");
3909       if (_cm->verbose_low()) {
3910         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
3911       }
3912       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
3913       if (claimed_region != NULL) {
3914         // Yes, we managed to claim one
3915         statsOnly( ++_regions_claimed );
3916 
3917         if (_cm->verbose_low()) {
3918           gclog_or_tty->print_cr("[%u] we successfully claimed "
3919                                  "region " PTR_FORMAT,
3920                                  _worker_id, p2i(claimed_region));
3921         }
3922 
3923         setup_for_region(claimed_region);
3924         assert(_curr_region == claimed_region, "invariant");
3925       }
3926       // It is important to call the regular clock here. It might take
3927       // a while to claim a region if, for example, we hit a large
3928       // block of empty regions. So we need to call the regular clock
3929       // method once round the loop to make sure it's called
3930       // frequently enough.
3931       regular_clock_call();
3932     }
3933 
3934     if (!has_aborted() && _curr_region == NULL) {
3935       assert(_cm->out_of_regions(),
3936              "at this point we should be out of regions");
3937     }
3938   } while ( _curr_region != NULL && !has_aborted());
3939 
3940   if (!has_aborted()) {
3941     // We cannot check whether the global stack is empty, since other
3942     // tasks might be pushing objects to it concurrently.
3943     assert(_cm->out_of_regions(),
3944            "at this point we should be out of regions");
3945 
3946     if (_cm->verbose_low()) {
3947       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
3948     }
3949 
3950     // Try to reduce the number of available SATB buffers so that
3951     // remark has less work to do.
3952     drain_satb_buffers();
3953   }
3954 
3955   // Since we've done everything else, we can now totally drain the
3956   // local queue and global stack.
3957   drain_local_queue(false);
3958   drain_global_stack(false);
3959 
3960   // Attempt at work stealing from other task's queues.
3961   if (do_stealing && !has_aborted()) {
3962     // We have not aborted. This means that we have finished all that
3963     // we could. Let's try to do some stealing...
3964 
3965     // We cannot check whether the global stack is empty, since other
3966     // tasks might be pushing objects to it concurrently.
3967     assert(_cm->out_of_regions() && _task_queue->size() == 0,
3968            "only way to reach here");
3969 
3970     if (_cm->verbose_low()) {
3971       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
3972     }
3973 
3974     while (!has_aborted()) {
3975       oop obj;
3976       statsOnly( ++_steal_attempts );
3977 
3978       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
3979         if (_cm->verbose_medium()) {
3980           gclog_or_tty->print_cr("[%u] stolen " PTR_FORMAT " successfully",
3981                                  _worker_id, p2i((void*) obj));
3982         }
3983 
3984         statsOnly( ++_steals );
3985 
3986         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
3987                "any stolen object should be marked");
3988         scan_object(obj);
3989 
3990         // And since we're towards the end, let's totally drain the
3991         // local queue and global stack.
3992         drain_local_queue(false);
3993         drain_global_stack(false);
3994       } else {
3995         break;
3996       }
3997     }
3998   }
3999 
4000   // If we are about to wrap up and go into termination, check if we
4001   // should raise the overflow flag.
4002   if (do_termination && !has_aborted()) {
4003     if (_cm->force_overflow()->should_force()) {
4004       _cm->set_has_overflown();
4005       regular_clock_call();
4006     }
4007   }
4008 
4009   // We still haven't aborted. Now, let's try to get into the
4010   // termination protocol.
4011   if (do_termination && !has_aborted()) {
4012     // We cannot check whether the global stack is empty, since other
4013     // tasks might be concurrently pushing objects on it.
4014     // Separated the asserts so that we know which one fires.
4015     assert(_cm->out_of_regions(), "only way to reach here");
4016     assert(_task_queue->size() == 0, "only way to reach here");
4017 
4018     if (_cm->verbose_low()) {
4019       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4020     }
4021 
4022     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4023 
4024     // The CMTask class also extends the TerminatorTerminator class,
4025     // hence its should_exit_termination() method will also decide
4026     // whether to exit the termination protocol or not.
4027     bool finished = (is_serial ||
4028                      _cm->terminator()->offer_termination(this));
4029     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4030     _termination_time_ms +=
4031       termination_end_time_ms - _termination_start_time_ms;
4032 
4033     if (finished) {
4034       // We're all done.
4035 
4036       if (_worker_id == 0) {
4037         // let's allow task 0 to do this
4038         if (concurrent()) {
4039           assert(_cm->concurrent_marking_in_progress(), "invariant");
4040           // we need to set this to false before the next
4041           // safepoint. This way we ensure that the marking phase
4042           // doesn't observe any more heap expansions.
4043           _cm->clear_concurrent_marking_in_progress();
4044         }
4045       }
4046 
4047       // We can now guarantee that the global stack is empty, since
4048       // all other tasks have finished. We separated the guarantees so
4049       // that, if a condition is false, we can immediately find out
4050       // which one.
4051       guarantee(_cm->out_of_regions(), "only way to reach here");
4052       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4053       guarantee(_task_queue->size() == 0, "only way to reach here");
4054       guarantee(!_cm->has_overflown(), "only way to reach here");
4055       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4056 
4057       if (_cm->verbose_low()) {
4058         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4059       }
4060     } else {
4061       // Apparently there's more work to do. Let's abort this task. It
4062       // will restart it and we can hopefully find more things to do.
4063 
4064       if (_cm->verbose_low()) {
4065         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4066                                _worker_id);
4067       }
4068 
4069       set_has_aborted();
4070       statsOnly( ++_aborted_termination );
4071     }
4072   }
4073 
4074   // Mainly for debugging purposes to make sure that a pointer to the
4075   // closure which was statically allocated in this frame doesn't
4076   // escape it by accident.
4077   set_cm_oop_closure(NULL);
4078   double end_time_ms = os::elapsedVTime() * 1000.0;
4079   double elapsed_time_ms = end_time_ms - _start_time_ms;
4080   // Update the step history.
4081   _step_times_ms.add(elapsed_time_ms);
4082 
4083   if (has_aborted()) {
4084     // The task was aborted for some reason.
4085 
4086     statsOnly( ++_aborted );
4087 
4088     if (_has_timed_out) {
4089       double diff_ms = elapsed_time_ms - _time_target_ms;
4090       // Keep statistics of how well we did with respect to hitting
4091       // our target only if we actually timed out (if we aborted for
4092       // other reasons, then the results might get skewed).
4093       _marking_step_diffs_ms.add(diff_ms);
4094     }
4095 
4096     if (_cm->has_overflown()) {
4097       // This is the interesting one. We aborted because a global
4098       // overflow was raised. This means we have to restart the
4099       // marking phase and start iterating over regions. However, in
4100       // order to do this we have to make sure that all tasks stop
4101       // what they are doing and re-initialize in a safe manner. We
4102       // will achieve this with the use of two barrier sync points.
4103 
4104       if (_cm->verbose_low()) {
4105         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4106       }
4107 
4108       if (!is_serial) {
4109         // We only need to enter the sync barrier if being called
4110         // from a parallel context
4111         _cm->enter_first_sync_barrier(_worker_id);
4112 
4113         // When we exit this sync barrier we know that all tasks have
4114         // stopped doing marking work. So, it's now safe to
4115         // re-initialize our data structures. At the end of this method,
4116         // task 0 will clear the global data structures.
4117       }
4118 
4119       statsOnly( ++_aborted_overflow );
4120 
4121       // We clear the local state of this task...
4122       clear_region_fields();
4123 
4124       if (!is_serial) {
4125         // ...and enter the second barrier.
4126         _cm->enter_second_sync_barrier(_worker_id);
4127       }
4128       // At this point, if we're during the concurrent phase of
4129       // marking, everything has been re-initialized and we're
4130       // ready to restart.
4131     }
4132 
4133     if (_cm->verbose_low()) {
4134       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4135                              "elapsed = %1.2lfms <<<<<<<<<<",
4136                              _worker_id, _time_target_ms, elapsed_time_ms);
4137       if (_cm->has_aborted()) {
4138         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4139                                _worker_id);
4140       }
4141     }
4142   } else {
4143     if (_cm->verbose_low()) {
4144       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4145                              "elapsed = %1.2lfms <<<<<<<<<<",
4146                              _worker_id, _time_target_ms, elapsed_time_ms);
4147     }
4148   }
4149 
4150   _claimed = false;
4151 }
4152 
4153 CMTask::CMTask(uint worker_id,
4154                ConcurrentMark* cm,
4155                size_t* marked_bytes,
4156                BitMap* card_bm,
4157                CMTaskQueue* task_queue,
4158                CMTaskQueueSet* task_queues)
4159   : _g1h(G1CollectedHeap::heap()),
4160     _worker_id(worker_id), _cm(cm),
4161     _claimed(false),
4162     _nextMarkBitMap(NULL), _hash_seed(17),
4163     _task_queue(task_queue),
4164     _task_queues(task_queues),
4165     _cm_oop_closure(NULL),
4166     _marked_bytes_array(marked_bytes),
4167     _card_bm(card_bm) {
4168   guarantee(task_queue != NULL, "invariant");
4169   guarantee(task_queues != NULL, "invariant");
4170 
4171   statsOnly( _clock_due_to_scanning = 0;
4172              _clock_due_to_marking  = 0 );
4173 
4174   _marking_step_diffs_ms.add(0.5);
4175 }
4176 
4177 // These are formatting macros that are used below to ensure
4178 // consistent formatting. The *_H_* versions are used to format the
4179 // header for a particular value and they should be kept consistent
4180 // with the corresponding macro. Also note that most of the macros add
4181 // the necessary white space (as a prefix) which makes them a bit
4182 // easier to compose.
4183 
4184 // All the output lines are prefixed with this string to be able to
4185 // identify them easily in a large log file.
4186 #define G1PPRL_LINE_PREFIX            "###"
4187 
4188 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
4189 #ifdef _LP64
4190 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4191 #else // _LP64
4192 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4193 #endif // _LP64
4194 
4195 // For per-region info
4196 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4197 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4198 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
4199 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4200 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4201 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4202 
4203 // For summary info
4204 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
4205 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
4206 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
4207 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
4208 
4209 G1PrintRegionLivenessInfoClosure::
4210 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4211   : _out(out),
4212     _total_used_bytes(0), _total_capacity_bytes(0),
4213     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4214     _hum_used_bytes(0), _hum_capacity_bytes(0),
4215     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4216     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4217   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4218   MemRegion g1_reserved = g1h->g1_reserved();
4219   double now = os::elapsedTime();
4220 
4221   // Print the header of the output.
4222   _out->cr();
4223   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4224   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4225                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4226                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4227                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4228                  HeapRegion::GrainBytes);
4229   _out->print_cr(G1PPRL_LINE_PREFIX);
4230   _out->print_cr(G1PPRL_LINE_PREFIX
4231                 G1PPRL_TYPE_H_FORMAT
4232                 G1PPRL_ADDR_BASE_H_FORMAT
4233                 G1PPRL_BYTE_H_FORMAT
4234                 G1PPRL_BYTE_H_FORMAT
4235                 G1PPRL_BYTE_H_FORMAT
4236                 G1PPRL_DOUBLE_H_FORMAT
4237                 G1PPRL_BYTE_H_FORMAT
4238                 G1PPRL_BYTE_H_FORMAT,
4239                 "type", "address-range",
4240                 "used", "prev-live", "next-live", "gc-eff",
4241                 "remset", "code-roots");
4242   _out->print_cr(G1PPRL_LINE_PREFIX
4243                 G1PPRL_TYPE_H_FORMAT
4244                 G1PPRL_ADDR_BASE_H_FORMAT
4245                 G1PPRL_BYTE_H_FORMAT
4246                 G1PPRL_BYTE_H_FORMAT
4247                 G1PPRL_BYTE_H_FORMAT
4248                 G1PPRL_DOUBLE_H_FORMAT
4249                 G1PPRL_BYTE_H_FORMAT
4250                 G1PPRL_BYTE_H_FORMAT,
4251                 "", "",
4252                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4253                 "(bytes)", "(bytes)");
4254 }
4255 
4256 // It takes as a parameter a reference to one of the _hum_* fields, it
4257 // deduces the corresponding value for a region in a humongous region
4258 // series (either the region size, or what's left if the _hum_* field
4259 // is < the region size), and updates the _hum_* field accordingly.
4260 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4261   size_t bytes = 0;
4262   // The > 0 check is to deal with the prev and next live bytes which
4263   // could be 0.
4264   if (*hum_bytes > 0) {
4265     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4266     *hum_bytes -= bytes;
4267   }
4268   return bytes;
4269 }
4270 
4271 // It deduces the values for a region in a humongous region series
4272 // from the _hum_* fields and updates those accordingly. It assumes
4273 // that that _hum_* fields have already been set up from the "starts
4274 // humongous" region and we visit the regions in address order.
4275 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4276                                                      size_t* capacity_bytes,
4277                                                      size_t* prev_live_bytes,
4278                                                      size_t* next_live_bytes) {
4279   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4280   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4281   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4282   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4283   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4284 }
4285 
4286 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4287   const char* type       = r->get_type_str();
4288   HeapWord* bottom       = r->bottom();
4289   HeapWord* end          = r->end();
4290   size_t capacity_bytes  = r->capacity();
4291   size_t used_bytes      = r->used();
4292   size_t prev_live_bytes = r->live_bytes();
4293   size_t next_live_bytes = r->next_live_bytes();
4294   double gc_eff          = r->gc_efficiency();
4295   size_t remset_bytes    = r->rem_set()->mem_size();
4296   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4297 
4298   if (r->is_starts_humongous()) {
4299     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4300            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4301            "they should have been zeroed after the last time we used them");
4302     // Set up the _hum_* fields.
4303     _hum_capacity_bytes  = capacity_bytes;
4304     _hum_used_bytes      = used_bytes;
4305     _hum_prev_live_bytes = prev_live_bytes;
4306     _hum_next_live_bytes = next_live_bytes;
4307     get_hum_bytes(&used_bytes, &capacity_bytes,
4308                   &prev_live_bytes, &next_live_bytes);
4309     end = bottom + HeapRegion::GrainWords;
4310   } else if (r->is_continues_humongous()) {
4311     get_hum_bytes(&used_bytes, &capacity_bytes,
4312                   &prev_live_bytes, &next_live_bytes);
4313     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4314   }
4315 
4316   _total_used_bytes      += used_bytes;
4317   _total_capacity_bytes  += capacity_bytes;
4318   _total_prev_live_bytes += prev_live_bytes;
4319   _total_next_live_bytes += next_live_bytes;
4320   _total_remset_bytes    += remset_bytes;
4321   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4322 
4323   // Print a line for this particular region.
4324   _out->print_cr(G1PPRL_LINE_PREFIX
4325                  G1PPRL_TYPE_FORMAT
4326                  G1PPRL_ADDR_BASE_FORMAT
4327                  G1PPRL_BYTE_FORMAT
4328                  G1PPRL_BYTE_FORMAT
4329                  G1PPRL_BYTE_FORMAT
4330                  G1PPRL_DOUBLE_FORMAT
4331                  G1PPRL_BYTE_FORMAT
4332                  G1PPRL_BYTE_FORMAT,
4333                  type, p2i(bottom), p2i(end),
4334                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4335                  remset_bytes, strong_code_roots_bytes);
4336 
4337   return false;
4338 }
4339 
4340 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4341   // add static memory usages to remembered set sizes
4342   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4343   // Print the footer of the output.
4344   _out->print_cr(G1PPRL_LINE_PREFIX);
4345   _out->print_cr(G1PPRL_LINE_PREFIX
4346                  " SUMMARY"
4347                  G1PPRL_SUM_MB_FORMAT("capacity")
4348                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4349                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4350                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4351                  G1PPRL_SUM_MB_FORMAT("remset")
4352                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4353                  bytes_to_mb(_total_capacity_bytes),
4354                  bytes_to_mb(_total_used_bytes),
4355                  perc(_total_used_bytes, _total_capacity_bytes),
4356                  bytes_to_mb(_total_prev_live_bytes),
4357                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4358                  bytes_to_mb(_total_next_live_bytes),
4359                  perc(_total_next_live_bytes, _total_capacity_bytes),
4360                  bytes_to_mb(_total_remset_bytes),
4361                  bytes_to_mb(_total_strong_code_roots_bytes));
4362   _out->cr();
4363 }