1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1ErgoVerbose.hpp"
  34 #include "gc/g1/g1Log.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1RemSet.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionManager.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/gcTimer.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.hpp"
  45 #include "gc/shared/genOopClosures.inline.hpp"
  46 #include "gc/shared/referencePolicy.hpp"
  47 #include "gc/shared/strongRootsScope.hpp"
  48 #include "gc/shared/taskqueue.inline.hpp"
  49 #include "gc/shared/vmGCOperations.hpp"
  50 #include "memory/allocation.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "runtime/atomic.inline.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/java.hpp"
  56 #include "runtime/prefetch.inline.hpp"
  57 #include "services/memTracker.hpp"
  58 
  59 // Concurrent marking bit map wrapper
  60 
  61 CMBitMapRO::CMBitMapRO(int shifter) :
  62   _bm(),
  63   _shifter(shifter) {
  64   _bmStartWord = 0;
  65   _bmWordSize = 0;
  66 }
  67 
  68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  69                                                const HeapWord* limit) const {
  70   // First we must round addr *up* to a possible object boundary.
  71   addr = (HeapWord*)align_size_up((intptr_t)addr,
  72                                   HeapWordSize << _shifter);
  73   size_t addrOffset = heapWordToOffset(addr);
  74   if (limit == NULL) {
  75     limit = _bmStartWord + _bmWordSize;
  76   }
  77   size_t limitOffset = heapWordToOffset(limit);
  78   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  79   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  80   assert(nextAddr >= addr, "get_next_one postcondition");
  81   assert(nextAddr == limit || isMarked(nextAddr),
  82          "get_next_one postcondition");
  83   return nextAddr;
  84 }
  85 
  86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  87                                                  const HeapWord* limit) const {
  88   size_t addrOffset = heapWordToOffset(addr);
  89   if (limit == NULL) {
  90     limit = _bmStartWord + _bmWordSize;
  91   }
  92   size_t limitOffset = heapWordToOffset(limit);
  93   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  94   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  95   assert(nextAddr >= addr, "get_next_one postcondition");
  96   assert(nextAddr == limit || !isMarked(nextAddr),
  97          "get_next_one postcondition");
  98   return nextAddr;
  99 }
 100 
 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
 102   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 103   return (int) (diff >> _shifter);
 104 }
 105 
 106 #ifndef PRODUCT
 107 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 108   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 109   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 110          "size inconsistency");
 111   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 112          _bmWordSize  == heap_rs.word_size();
 113 }
 114 #endif
 115 
 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 117   _bm.print_on_error(st, prefix);
 118 }
 119 
 120 size_t CMBitMap::compute_size(size_t heap_size) {
 121   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 122 }
 123 
 124 size_t CMBitMap::mark_distance() {
 125   return MinObjAlignmentInBytes * BitsPerByte;
 126 }
 127 
 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 129   _bmStartWord = heap.start();
 130   _bmWordSize = heap.word_size();
 131 
 132   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 133   _bm.set_size(_bmWordSize >> _shifter);
 134 
 135   storage->set_mapping_changed_listener(&_listener);
 136 }
 137 
 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 139   if (zero_filled) {
 140     return;
 141   }
 142   // We need to clear the bitmap on commit, removing any existing information.
 143   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 144   _bm->clearRange(mr);
 145 }
 146 
 147 // Closure used for clearing the given mark bitmap.
 148 class ClearBitmapHRClosure : public HeapRegionClosure {
 149  private:
 150   ConcurrentMark* _cm;
 151   CMBitMap* _bitmap;
 152   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 153  public:
 154   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 155     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 156   }
 157 
 158   virtual bool doHeapRegion(HeapRegion* r) {
 159     size_t const chunk_size_in_words = M / HeapWordSize;
 160 
 161     HeapWord* cur = r->bottom();
 162     HeapWord* const end = r->end();
 163 
 164     while (cur < end) {
 165       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 166       _bitmap->clearRange(mr);
 167 
 168       cur += chunk_size_in_words;
 169 
 170       // Abort iteration if after yielding the marking has been aborted.
 171       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 172         return true;
 173       }
 174       // Repeat the asserts from before the start of the closure. We will do them
 175       // as asserts here to minimize their overhead on the product. However, we
 176       // will have them as guarantees at the beginning / end of the bitmap
 177       // clearing to get some checking in the product.
 178       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 179       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 180     }
 181 
 182     return false;
 183   }
 184 };
 185 
 186 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 187   ClearBitmapHRClosure* _cl;
 188   HeapRegionClaimer     _hrclaimer;
 189   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 190 
 191 public:
 192   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 193       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 194 
 195   void work(uint worker_id) {
 196     SuspendibleThreadSetJoiner sts_join(_suspendible);
 197     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 198   }
 199 };
 200 
 201 void CMBitMap::clearAll() {
 202   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 203   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 204   uint n_workers = g1h->workers()->active_workers();
 205   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 206   g1h->workers()->run_task(&task);
 207   guarantee(cl.complete(), "Must have completed iteration.");
 208   return;
 209 }
 210 
 211 void CMBitMap::markRange(MemRegion mr) {
 212   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 213   assert(!mr.is_empty(), "unexpected empty region");
 214   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 215           ((HeapWord *) mr.end())),
 216          "markRange memory region end is not card aligned");
 217   // convert address range into offset range
 218   _bm.at_put_range(heapWordToOffset(mr.start()),
 219                    heapWordToOffset(mr.end()), true);
 220 }
 221 
 222 void CMBitMap::clearRange(MemRegion mr) {
 223   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 224   assert(!mr.is_empty(), "unexpected empty region");
 225   // convert address range into offset range
 226   _bm.at_put_range(heapWordToOffset(mr.start()),
 227                    heapWordToOffset(mr.end()), false);
 228 }
 229 
 230 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 231                                             HeapWord* end_addr) {
 232   HeapWord* start = getNextMarkedWordAddress(addr);
 233   start = MIN2(start, end_addr);
 234   HeapWord* end   = getNextUnmarkedWordAddress(start);
 235   end = MIN2(end, end_addr);
 236   assert(start <= end, "Consistency check");
 237   MemRegion mr(start, end);
 238   if (!mr.is_empty()) {
 239     clearRange(mr);
 240   }
 241   return mr;
 242 }
 243 
 244 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 245   _base(NULL), _cm(cm)
 246 #ifdef ASSERT
 247   , _drain_in_progress(false)
 248   , _drain_in_progress_yields(false)
 249 #endif
 250 {}
 251 
 252 bool CMMarkStack::allocate(size_t capacity) {
 253   // allocate a stack of the requisite depth
 254   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 255   if (!rs.is_reserved()) {
 256     warning("ConcurrentMark MarkStack allocation failure");
 257     return false;
 258   }
 259   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 260   if (!_virtual_space.initialize(rs, rs.size())) {
 261     warning("ConcurrentMark MarkStack backing store failure");
 262     // Release the virtual memory reserved for the marking stack
 263     rs.release();
 264     return false;
 265   }
 266   assert(_virtual_space.committed_size() == rs.size(),
 267          "Didn't reserve backing store for all of ConcurrentMark stack?");
 268   _base = (oop*) _virtual_space.low();
 269   setEmpty();
 270   _capacity = (jint) capacity;
 271   _saved_index = -1;
 272   _should_expand = false;
 273   return true;
 274 }
 275 
 276 void CMMarkStack::expand() {
 277   // Called, during remark, if we've overflown the marking stack during marking.
 278   assert(isEmpty(), "stack should been emptied while handling overflow");
 279   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 280   // Clear expansion flag
 281   _should_expand = false;
 282   if (_capacity == (jint) MarkStackSizeMax) {
 283     if (PrintGCDetails && Verbose) {
 284       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 285     }
 286     return;
 287   }
 288   // Double capacity if possible
 289   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 290   // Do not give up existing stack until we have managed to
 291   // get the double capacity that we desired.
 292   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 293                                                            sizeof(oop)));
 294   if (rs.is_reserved()) {
 295     // Release the backing store associated with old stack
 296     _virtual_space.release();
 297     // Reinitialize virtual space for new stack
 298     if (!_virtual_space.initialize(rs, rs.size())) {
 299       fatal("Not enough swap for expanded marking stack capacity");
 300     }
 301     _base = (oop*)(_virtual_space.low());
 302     _index = 0;
 303     _capacity = new_capacity;
 304   } else {
 305     if (PrintGCDetails && Verbose) {
 306       // Failed to double capacity, continue;
 307       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 308                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 309                           _capacity / K, new_capacity / K);
 310     }
 311   }
 312 }
 313 
 314 void CMMarkStack::set_should_expand() {
 315   // If we're resetting the marking state because of an
 316   // marking stack overflow, record that we should, if
 317   // possible, expand the stack.
 318   _should_expand = _cm->has_overflown();
 319 }
 320 
 321 CMMarkStack::~CMMarkStack() {
 322   if (_base != NULL) {
 323     _base = NULL;
 324     _virtual_space.release();
 325   }
 326 }
 327 
 328 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 329   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 330   jint start = _index;
 331   jint next_index = start + n;
 332   if (next_index > _capacity) {
 333     _overflow = true;
 334     return;
 335   }
 336   // Otherwise.
 337   _index = next_index;
 338   for (int i = 0; i < n; i++) {
 339     int ind = start + i;
 340     assert(ind < _capacity, "By overflow test above.");
 341     _base[ind] = ptr_arr[i];
 342   }
 343 }
 344 
 345 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 346   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 347   jint index = _index;
 348   if (index == 0) {
 349     *n = 0;
 350     return false;
 351   } else {
 352     int k = MIN2(max, index);
 353     jint  new_ind = index - k;
 354     for (int j = 0; j < k; j++) {
 355       ptr_arr[j] = _base[new_ind + j];
 356     }
 357     _index = new_ind;
 358     *n = k;
 359     return true;
 360   }
 361 }
 362 
 363 template<class OopClosureClass>
 364 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 365   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 366          || SafepointSynchronize::is_at_safepoint(),
 367          "Drain recursion must be yield-safe.");
 368   bool res = true;
 369   debug_only(_drain_in_progress = true);
 370   debug_only(_drain_in_progress_yields = yield_after);
 371   while (!isEmpty()) {
 372     oop newOop = pop();
 373     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 374     assert(newOop->is_oop(), "Expected an oop");
 375     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 376            "only grey objects on this stack");
 377     newOop->oop_iterate(cl);
 378     if (yield_after && _cm->do_yield_check()) {
 379       res = false;
 380       break;
 381     }
 382   }
 383   debug_only(_drain_in_progress = false);
 384   return res;
 385 }
 386 
 387 void CMMarkStack::note_start_of_gc() {
 388   assert(_saved_index == -1,
 389          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 390   _saved_index = _index;
 391 }
 392 
 393 void CMMarkStack::note_end_of_gc() {
 394   // This is intentionally a guarantee, instead of an assert. If we
 395   // accidentally add something to the mark stack during GC, it
 396   // will be a correctness issue so it's better if we crash. we'll
 397   // only check this once per GC anyway, so it won't be a performance
 398   // issue in any way.
 399   guarantee(_saved_index == _index,
 400             err_msg("saved index: %d index: %d", _saved_index, _index));
 401   _saved_index = -1;
 402 }
 403 
 404 void CMMarkStack::oops_do(OopClosure* f) {
 405   assert(_saved_index == _index,
 406          err_msg("saved index: %d index: %d", _saved_index, _index));
 407   for (int i = 0; i < _index; i += 1) {
 408     f->do_oop(&_base[i]);
 409   }
 410 }
 411 
 412 CMRootRegions::CMRootRegions() :
 413   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 414   _should_abort(false),  _next_survivor(NULL) { }
 415 
 416 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 417   _young_list = g1h->young_list();
 418   _cm = cm;
 419 }
 420 
 421 void CMRootRegions::prepare_for_scan() {
 422   assert(!scan_in_progress(), "pre-condition");
 423 
 424   // Currently, only survivors can be root regions.
 425   assert(_next_survivor == NULL, "pre-condition");
 426   _next_survivor = _young_list->first_survivor_region();
 427   _scan_in_progress = (_next_survivor != NULL);
 428   _should_abort = false;
 429 }
 430 
 431 HeapRegion* CMRootRegions::claim_next() {
 432   if (_should_abort) {
 433     // If someone has set the should_abort flag, we return NULL to
 434     // force the caller to bail out of their loop.
 435     return NULL;
 436   }
 437 
 438   // Currently, only survivors can be root regions.
 439   HeapRegion* res = _next_survivor;
 440   if (res != NULL) {
 441     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 442     // Read it again in case it changed while we were waiting for the lock.
 443     res = _next_survivor;
 444     if (res != NULL) {
 445       if (res == _young_list->last_survivor_region()) {
 446         // We just claimed the last survivor so store NULL to indicate
 447         // that we're done.
 448         _next_survivor = NULL;
 449       } else {
 450         _next_survivor = res->get_next_young_region();
 451       }
 452     } else {
 453       // Someone else claimed the last survivor while we were trying
 454       // to take the lock so nothing else to do.
 455     }
 456   }
 457   assert(res == NULL || res->is_survivor(), "post-condition");
 458 
 459   return res;
 460 }
 461 
 462 void CMRootRegions::scan_finished() {
 463   assert(scan_in_progress(), "pre-condition");
 464 
 465   // Currently, only survivors can be root regions.
 466   if (!_should_abort) {
 467     assert(_next_survivor == NULL, "we should have claimed all survivors");
 468   }
 469   _next_survivor = NULL;
 470 
 471   {
 472     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 473     _scan_in_progress = false;
 474     RootRegionScan_lock->notify_all();
 475   }
 476 }
 477 
 478 bool CMRootRegions::wait_until_scan_finished() {
 479   if (!scan_in_progress()) return false;
 480 
 481   {
 482     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 483     while (scan_in_progress()) {
 484       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 485     }
 486   }
 487   return true;
 488 }
 489 
 490 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 491 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 492 #endif // _MSC_VER
 493 
 494 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 495   return MAX2((n_par_threads + 2) / 4, 1U);
 496 }
 497 
 498 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 499   _g1h(g1h),
 500   _markBitMap1(),
 501   _markBitMap2(),
 502   _parallel_marking_threads(0),
 503   _max_parallel_marking_threads(0),
 504   _sleep_factor(0.0),
 505   _marking_task_overhead(1.0),
 506   _cleanup_sleep_factor(0.0),
 507   _cleanup_task_overhead(1.0),
 508   _cleanup_list("Cleanup List"),
 509   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 510   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 511             CardTableModRefBS::card_shift,
 512             false /* in_resource_area*/),
 513 
 514   _prevMarkBitMap(&_markBitMap1),
 515   _nextMarkBitMap(&_markBitMap2),
 516 
 517   _markStack(this),
 518   // _finger set in set_non_marking_state
 519 
 520   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 521   // _active_tasks set in set_non_marking_state
 522   // _tasks set inside the constructor
 523   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 524   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 525 
 526   _has_overflown(false),
 527   _concurrent(false),
 528   _has_aborted(false),
 529   _aborted_gc_id(GCId::undefined()),
 530   _restart_for_overflow(false),
 531   _concurrent_marking_in_progress(false),
 532 
 533   // _verbose_level set below
 534 
 535   _init_times(),
 536   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 537   _cleanup_times(),
 538   _total_counting_time(0.0),
 539   _total_rs_scrub_time(0.0),
 540 
 541   _parallel_workers(NULL),
 542 
 543   _count_card_bitmaps(NULL),
 544   _count_marked_bytes(NULL),
 545   _completed_initialization(false) {
 546   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 547   if (verbose_level < no_verbose) {
 548     verbose_level = no_verbose;
 549   }
 550   if (verbose_level > high_verbose) {
 551     verbose_level = high_verbose;
 552   }
 553   _verbose_level = verbose_level;
 554 
 555   if (verbose_low()) {
 556     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 557                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 558   }
 559 
 560   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 561   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 562 
 563   // Create & start a ConcurrentMark thread.
 564   _cmThread = new ConcurrentMarkThread(this);
 565   assert(cmThread() != NULL, "CM Thread should have been created");
 566   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 567   if (_cmThread->osthread() == NULL) {
 568       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 569   }
 570 
 571   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 572   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 573   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 574 
 575   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 576   satb_qs.set_buffer_size(G1SATBBufferSize);
 577 
 578   _root_regions.init(_g1h, this);
 579 
 580   if (ConcGCThreads > ParallelGCThreads) {
 581     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 582             "than ParallelGCThreads (" UINTX_FORMAT ").",
 583             ConcGCThreads, ParallelGCThreads);
 584     return;
 585   }
 586   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 587     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 588     // if both are set
 589     _sleep_factor             = 0.0;
 590     _marking_task_overhead    = 1.0;
 591   } else if (G1MarkingOverheadPercent > 0) {
 592     // We will calculate the number of parallel marking threads based
 593     // on a target overhead with respect to the soft real-time goal
 594     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 595     double overall_cm_overhead =
 596       (double) MaxGCPauseMillis * marking_overhead /
 597       (double) GCPauseIntervalMillis;
 598     double cpu_ratio = 1.0 / (double) os::processor_count();
 599     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 600     double marking_task_overhead =
 601       overall_cm_overhead / marking_thread_num *
 602                                               (double) os::processor_count();
 603     double sleep_factor =
 604                        (1.0 - marking_task_overhead) / marking_task_overhead;
 605 
 606     FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 607     _sleep_factor             = sleep_factor;
 608     _marking_task_overhead    = marking_task_overhead;
 609   } else {
 610     // Calculate the number of parallel marking threads by scaling
 611     // the number of parallel GC threads.
 612     uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 613     FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 614     _sleep_factor             = 0.0;
 615     _marking_task_overhead    = 1.0;
 616   }
 617 
 618   assert(ConcGCThreads > 0, "Should have been set");
 619   _parallel_marking_threads = (uint) ConcGCThreads;
 620   _max_parallel_marking_threads = _parallel_marking_threads;
 621 
 622   if (parallel_marking_threads() > 1) {
 623     _cleanup_task_overhead = 1.0;
 624   } else {
 625     _cleanup_task_overhead = marking_task_overhead();
 626   }
 627   _cleanup_sleep_factor =
 628                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 629 
 630 #if 0
 631   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 632   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 633   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 634   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 635   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 636 #endif
 637 
 638   _parallel_workers = new FlexibleWorkGang("G1 Marker",
 639        _max_parallel_marking_threads, false, true);
 640   if (_parallel_workers == NULL) {
 641     vm_exit_during_initialization("Failed necessary allocation.");
 642   } else {
 643     _parallel_workers->initialize_workers();
 644   }
 645 
 646   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 647     size_t mark_stack_size =
 648       MIN2(MarkStackSizeMax,
 649           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 650     // Verify that the calculated value for MarkStackSize is in range.
 651     // It would be nice to use the private utility routine from Arguments.
 652     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 653       warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 654               "must be between 1 and " SIZE_FORMAT,
 655               mark_stack_size, MarkStackSizeMax);
 656       return;
 657     }
 658     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 659   } else {
 660     // Verify MarkStackSize is in range.
 661     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 662       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 663         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 664           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 665                   "must be between 1 and " SIZE_FORMAT,
 666                   MarkStackSize, MarkStackSizeMax);
 667           return;
 668         }
 669       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 670         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 671           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 672                   " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 673                   MarkStackSize, MarkStackSizeMax);
 674           return;
 675         }
 676       }
 677     }
 678   }
 679 
 680   if (!_markStack.allocate(MarkStackSize)) {
 681     warning("Failed to allocate CM marking stack");
 682     return;
 683   }
 684 
 685   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 686   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 687 
 688   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 689   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 690 
 691   BitMap::idx_t card_bm_size = _card_bm.size();
 692 
 693   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 694   _active_tasks = _max_worker_id;
 695 
 696   uint max_regions = _g1h->max_regions();
 697   for (uint i = 0; i < _max_worker_id; ++i) {
 698     CMTaskQueue* task_queue = new CMTaskQueue();
 699     task_queue->initialize();
 700     _task_queues->register_queue(i, task_queue);
 701 
 702     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 703     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 704 
 705     _tasks[i] = new CMTask(i, this,
 706                            _count_marked_bytes[i],
 707                            &_count_card_bitmaps[i],
 708                            task_queue, _task_queues);
 709 
 710     _accum_task_vtime[i] = 0.0;
 711   }
 712 
 713   // Calculate the card number for the bottom of the heap. Used
 714   // in biasing indexes into the accounting card bitmaps.
 715   _heap_bottom_card_num =
 716     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 717                                 CardTableModRefBS::card_shift);
 718 
 719   // Clear all the liveness counting data
 720   clear_all_count_data();
 721 
 722   // so that the call below can read a sensible value
 723   _heap_start = g1h->reserved_region().start();
 724   set_non_marking_state();
 725   _completed_initialization = true;
 726 }
 727 
 728 void ConcurrentMark::reset() {
 729   // Starting values for these two. This should be called in a STW
 730   // phase.
 731   MemRegion reserved = _g1h->g1_reserved();
 732   _heap_start = reserved.start();
 733   _heap_end   = reserved.end();
 734 
 735   // Separated the asserts so that we know which one fires.
 736   assert(_heap_start != NULL, "heap bounds should look ok");
 737   assert(_heap_end != NULL, "heap bounds should look ok");
 738   assert(_heap_start < _heap_end, "heap bounds should look ok");
 739 
 740   // Reset all the marking data structures and any necessary flags
 741   reset_marking_state();
 742 
 743   if (verbose_low()) {
 744     gclog_or_tty->print_cr("[global] resetting");
 745   }
 746 
 747   // We do reset all of them, since different phases will use
 748   // different number of active threads. So, it's easiest to have all
 749   // of them ready.
 750   for (uint i = 0; i < _max_worker_id; ++i) {
 751     _tasks[i]->reset(_nextMarkBitMap);
 752   }
 753 
 754   // we need this to make sure that the flag is on during the evac
 755   // pause with initial mark piggy-backed
 756   set_concurrent_marking_in_progress();
 757 }
 758 
 759 
 760 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 761   _markStack.set_should_expand();
 762   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 763   if (clear_overflow) {
 764     clear_has_overflown();
 765   } else {
 766     assert(has_overflown(), "pre-condition");
 767   }
 768   _finger = _heap_start;
 769 
 770   for (uint i = 0; i < _max_worker_id; ++i) {
 771     CMTaskQueue* queue = _task_queues->queue(i);
 772     queue->set_empty();
 773   }
 774 }
 775 
 776 void ConcurrentMark::set_concurrency(uint active_tasks) {
 777   assert(active_tasks <= _max_worker_id, "we should not have more");
 778 
 779   _active_tasks = active_tasks;
 780   // Need to update the three data structures below according to the
 781   // number of active threads for this phase.
 782   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 783   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 784   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 785 }
 786 
 787 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 788   set_concurrency(active_tasks);
 789 
 790   _concurrent = concurrent;
 791   // We propagate this to all tasks, not just the active ones.
 792   for (uint i = 0; i < _max_worker_id; ++i)
 793     _tasks[i]->set_concurrent(concurrent);
 794 
 795   if (concurrent) {
 796     set_concurrent_marking_in_progress();
 797   } else {
 798     // We currently assume that the concurrent flag has been set to
 799     // false before we start remark. At this point we should also be
 800     // in a STW phase.
 801     assert(!concurrent_marking_in_progress(), "invariant");
 802     assert(out_of_regions(),
 803            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 804                    p2i(_finger), p2i(_heap_end)));
 805   }
 806 }
 807 
 808 void ConcurrentMark::set_non_marking_state() {
 809   // We set the global marking state to some default values when we're
 810   // not doing marking.
 811   reset_marking_state();
 812   _active_tasks = 0;
 813   clear_concurrent_marking_in_progress();
 814 }
 815 
 816 ConcurrentMark::~ConcurrentMark() {
 817   // The ConcurrentMark instance is never freed.
 818   ShouldNotReachHere();
 819 }
 820 
 821 void ConcurrentMark::clearNextBitmap() {
 822   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 823 
 824   // Make sure that the concurrent mark thread looks to still be in
 825   // the current cycle.
 826   guarantee(cmThread()->during_cycle(), "invariant");
 827 
 828   // We are finishing up the current cycle by clearing the next
 829   // marking bitmap and getting it ready for the next cycle. During
 830   // this time no other cycle can start. So, let's make sure that this
 831   // is the case.
 832   guarantee(!g1h->mark_in_progress(), "invariant");
 833 
 834   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 835   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 836   _parallel_workers->run_task(&task);
 837 
 838   // Clear the liveness counting data. If the marking has been aborted, the abort()
 839   // call already did that.
 840   if (cl.complete()) {
 841     clear_all_count_data();
 842   }
 843 
 844   // Repeat the asserts from above.
 845   guarantee(cmThread()->during_cycle(), "invariant");
 846   guarantee(!g1h->mark_in_progress(), "invariant");
 847 }
 848 
 849 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 850   CMBitMap* _bitmap;
 851   bool _error;
 852  public:
 853   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 854   }
 855 
 856   virtual bool doHeapRegion(HeapRegion* r) {
 857     // This closure can be called concurrently to the mutator, so we must make sure
 858     // that the result of the getNextMarkedWordAddress() call is compared to the
 859     // value passed to it as limit to detect any found bits.
 860     // We can use the region's orig_end() for the limit and the comparison value
 861     // as it always contains the "real" end of the region that never changes and
 862     // has no side effects.
 863     // Due to the latter, there can also be no problem with the compiler generating
 864     // reloads of the orig_end() call.
 865     HeapWord* end = r->orig_end();
 866     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 867   }
 868 };
 869 
 870 bool ConcurrentMark::nextMarkBitmapIsClear() {
 871   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 872   _g1h->heap_region_iterate(&cl);
 873   return cl.complete();
 874 }
 875 
 876 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 877 public:
 878   bool doHeapRegion(HeapRegion* r) {
 879     if (!r->is_continues_humongous()) {
 880       r->note_start_of_marking();
 881     }
 882     return false;
 883   }
 884 };
 885 
 886 void ConcurrentMark::checkpointRootsInitialPre() {
 887   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 888   G1CollectorPolicy* g1p = g1h->g1_policy();
 889 
 890   _has_aborted = false;
 891 
 892   // Initialize marking structures. This has to be done in a STW phase.
 893   reset();
 894 
 895   // For each region note start of marking.
 896   NoteStartOfMarkHRClosure startcl;
 897   g1h->heap_region_iterate(&startcl);
 898 }
 899 
 900 
 901 void ConcurrentMark::checkpointRootsInitialPost() {
 902   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 903 
 904   // If we force an overflow during remark, the remark operation will
 905   // actually abort and we'll restart concurrent marking. If we always
 906   // force an overflow during remark we'll never actually complete the
 907   // marking phase. So, we initialize this here, at the start of the
 908   // cycle, so that at the remaining overflow number will decrease at
 909   // every remark and we'll eventually not need to cause one.
 910   force_overflow_stw()->init();
 911 
 912   // Start Concurrent Marking weak-reference discovery.
 913   ReferenceProcessor* rp = g1h->ref_processor_cm();
 914   // enable ("weak") refs discovery
 915   rp->enable_discovery();
 916   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 917 
 918   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 919   // This is the start of  the marking cycle, we're expected all
 920   // threads to have SATB queues with active set to false.
 921   satb_mq_set.set_active_all_threads(true, /* new active value */
 922                                      false /* expected_active */);
 923 
 924   _root_regions.prepare_for_scan();
 925 
 926   // update_g1_committed() will be called at the end of an evac pause
 927   // when marking is on. So, it's also called at the end of the
 928   // initial-mark pause to update the heap end, if the heap expands
 929   // during it. No need to call it here.
 930 }
 931 
 932 /*
 933  * Notice that in the next two methods, we actually leave the STS
 934  * during the barrier sync and join it immediately afterwards. If we
 935  * do not do this, the following deadlock can occur: one thread could
 936  * be in the barrier sync code, waiting for the other thread to also
 937  * sync up, whereas another one could be trying to yield, while also
 938  * waiting for the other threads to sync up too.
 939  *
 940  * Note, however, that this code is also used during remark and in
 941  * this case we should not attempt to leave / enter the STS, otherwise
 942  * we'll either hit an assert (debug / fastdebug) or deadlock
 943  * (product). So we should only leave / enter the STS if we are
 944  * operating concurrently.
 945  *
 946  * Because the thread that does the sync barrier has left the STS, it
 947  * is possible to be suspended for a Full GC or an evacuation pause
 948  * could occur. This is actually safe, since the entering the sync
 949  * barrier is one of the last things do_marking_step() does, and it
 950  * doesn't manipulate any data structures afterwards.
 951  */
 952 
 953 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 954   bool barrier_aborted;
 955 
 956   if (verbose_low()) {
 957     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 958   }
 959 
 960   {
 961     SuspendibleThreadSetLeaver sts_leave(concurrent());
 962     barrier_aborted = !_first_overflow_barrier_sync.enter();
 963   }
 964 
 965   // at this point everyone should have synced up and not be doing any
 966   // more work
 967 
 968   if (verbose_low()) {
 969     if (barrier_aborted) {
 970       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
 971     } else {
 972       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 973     }
 974   }
 975 
 976   if (barrier_aborted) {
 977     // If the barrier aborted we ignore the overflow condition and
 978     // just abort the whole marking phase as quickly as possible.
 979     return;
 980   }
 981 
 982   // If we're executing the concurrent phase of marking, reset the marking
 983   // state; otherwise the marking state is reset after reference processing,
 984   // during the remark pause.
 985   // If we reset here as a result of an overflow during the remark we will
 986   // see assertion failures from any subsequent set_concurrency_and_phase()
 987   // calls.
 988   if (concurrent()) {
 989     // let the task associated with with worker 0 do this
 990     if (worker_id == 0) {
 991       // task 0 is responsible for clearing the global data structures
 992       // We should be here because of an overflow. During STW we should
 993       // not clear the overflow flag since we rely on it being true when
 994       // we exit this method to abort the pause and restart concurrent
 995       // marking.
 996       reset_marking_state(true /* clear_overflow */);
 997       force_overflow()->update();
 998 
 999       if (G1Log::fine()) {
1000         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1001         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1002       }
1003     }
1004   }
1005 
1006   // after this, each task should reset its own data structures then
1007   // then go into the second barrier
1008 }
1009 
1010 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1011   bool barrier_aborted;
1012 
1013   if (verbose_low()) {
1014     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1015   }
1016 
1017   {
1018     SuspendibleThreadSetLeaver sts_leave(concurrent());
1019     barrier_aborted = !_second_overflow_barrier_sync.enter();
1020   }
1021 
1022   // at this point everything should be re-initialized and ready to go
1023 
1024   if (verbose_low()) {
1025     if (barrier_aborted) {
1026       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1027     } else {
1028       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1029     }
1030   }
1031 }
1032 
1033 #ifndef PRODUCT
1034 void ForceOverflowSettings::init() {
1035   _num_remaining = G1ConcMarkForceOverflow;
1036   _force = false;
1037   update();
1038 }
1039 
1040 void ForceOverflowSettings::update() {
1041   if (_num_remaining > 0) {
1042     _num_remaining -= 1;
1043     _force = true;
1044   } else {
1045     _force = false;
1046   }
1047 }
1048 
1049 bool ForceOverflowSettings::should_force() {
1050   if (_force) {
1051     _force = false;
1052     return true;
1053   } else {
1054     return false;
1055   }
1056 }
1057 #endif // !PRODUCT
1058 
1059 class CMConcurrentMarkingTask: public AbstractGangTask {
1060 private:
1061   ConcurrentMark*       _cm;
1062   ConcurrentMarkThread* _cmt;
1063 
1064 public:
1065   void work(uint worker_id) {
1066     assert(Thread::current()->is_ConcurrentGC_thread(),
1067            "this should only be done by a conc GC thread");
1068     ResourceMark rm;
1069 
1070     double start_vtime = os::elapsedVTime();
1071 
1072     {
1073       SuspendibleThreadSetJoiner sts_join;
1074 
1075       assert(worker_id < _cm->active_tasks(), "invariant");
1076       CMTask* the_task = _cm->task(worker_id);
1077       the_task->record_start_time();
1078       if (!_cm->has_aborted()) {
1079         do {
1080           double start_vtime_sec = os::elapsedVTime();
1081           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1082 
1083           the_task->do_marking_step(mark_step_duration_ms,
1084                                     true  /* do_termination */,
1085                                     false /* is_serial*/);
1086 
1087           double end_vtime_sec = os::elapsedVTime();
1088           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1089           _cm->clear_has_overflown();
1090 
1091           _cm->do_yield_check(worker_id);
1092 
1093           jlong sleep_time_ms;
1094           if (!_cm->has_aborted() && the_task->has_aborted()) {
1095             sleep_time_ms =
1096               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1097             {
1098               SuspendibleThreadSetLeaver sts_leave;
1099               os::sleep(Thread::current(), sleep_time_ms, false);
1100             }
1101           }
1102         } while (!_cm->has_aborted() && the_task->has_aborted());
1103       }
1104       the_task->record_end_time();
1105       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1106     }
1107 
1108     double end_vtime = os::elapsedVTime();
1109     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1110   }
1111 
1112   CMConcurrentMarkingTask(ConcurrentMark* cm,
1113                           ConcurrentMarkThread* cmt) :
1114       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1115 
1116   ~CMConcurrentMarkingTask() { }
1117 };
1118 
1119 // Calculates the number of active workers for a concurrent
1120 // phase.
1121 uint ConcurrentMark::calc_parallel_marking_threads() {
1122   uint n_conc_workers = 0;
1123   if (!UseDynamicNumberOfGCThreads ||
1124       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1125        !ForceDynamicNumberOfGCThreads)) {
1126     n_conc_workers = max_parallel_marking_threads();
1127   } else {
1128     n_conc_workers =
1129       AdaptiveSizePolicy::calc_default_active_workers(
1130                                    max_parallel_marking_threads(),
1131                                    1, /* Minimum workers */
1132                                    parallel_marking_threads(),
1133                                    Threads::number_of_non_daemon_threads());
1134     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1135     // that scaling has already gone into "_max_parallel_marking_threads".
1136   }
1137   assert(n_conc_workers > 0, "Always need at least 1");
1138   return n_conc_workers;
1139 }
1140 
1141 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1142   // Currently, only survivors can be root regions.
1143   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1144   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1145 
1146   const uintx interval = PrefetchScanIntervalInBytes;
1147   HeapWord* curr = hr->bottom();
1148   const HeapWord* end = hr->top();
1149   while (curr < end) {
1150     Prefetch::read(curr, interval);
1151     oop obj = oop(curr);
1152     int size = obj->oop_iterate(&cl);
1153     assert(size == obj->size(), "sanity");
1154     curr += size;
1155   }
1156 }
1157 
1158 class CMRootRegionScanTask : public AbstractGangTask {
1159 private:
1160   ConcurrentMark* _cm;
1161 
1162 public:
1163   CMRootRegionScanTask(ConcurrentMark* cm) :
1164     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1165 
1166   void work(uint worker_id) {
1167     assert(Thread::current()->is_ConcurrentGC_thread(),
1168            "this should only be done by a conc GC thread");
1169 
1170     CMRootRegions* root_regions = _cm->root_regions();
1171     HeapRegion* hr = root_regions->claim_next();
1172     while (hr != NULL) {
1173       _cm->scanRootRegion(hr, worker_id);
1174       hr = root_regions->claim_next();
1175     }
1176   }
1177 };
1178 
1179 void ConcurrentMark::scanRootRegions() {
1180   // Start of concurrent marking.
1181   ClassLoaderDataGraph::clear_claimed_marks();
1182 
1183   // scan_in_progress() will have been set to true only if there was
1184   // at least one root region to scan. So, if it's false, we
1185   // should not attempt to do any further work.
1186   if (root_regions()->scan_in_progress()) {
1187     _parallel_marking_threads = calc_parallel_marking_threads();
1188     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1189            "Maximum number of marking threads exceeded");
1190     uint active_workers = MAX2(1U, parallel_marking_threads());
1191 
1192     CMRootRegionScanTask task(this);
1193     _parallel_workers->set_active_workers(active_workers);
1194     _parallel_workers->run_task(&task);
1195 
1196     // It's possible that has_aborted() is true here without actually
1197     // aborting the survivor scan earlier. This is OK as it's
1198     // mainly used for sanity checking.
1199     root_regions()->scan_finished();
1200   }
1201 }
1202 
1203 void ConcurrentMark::markFromRoots() {
1204   // we might be tempted to assert that:
1205   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1206   //        "inconsistent argument?");
1207   // However that wouldn't be right, because it's possible that
1208   // a safepoint is indeed in progress as a younger generation
1209   // stop-the-world GC happens even as we mark in this generation.
1210 
1211   _restart_for_overflow = false;
1212   force_overflow_conc()->init();
1213 
1214   // _g1h has _n_par_threads
1215   _parallel_marking_threads = calc_parallel_marking_threads();
1216   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1217     "Maximum number of marking threads exceeded");
1218 
1219   uint active_workers = MAX2(1U, parallel_marking_threads());
1220 
1221   // Parallel task terminator is set in "set_concurrency_and_phase()"
1222   set_concurrency_and_phase(active_workers, true /* concurrent */);
1223 
1224   CMConcurrentMarkingTask markingTask(this, cmThread());
1225   _parallel_workers->set_active_workers(active_workers);
1226   // Don't set _n_par_threads because it affects MT in process_roots()
1227   // and the decisions on that MT processing is made elsewhere.
1228   assert(_parallel_workers->active_workers() > 0, "Should have been set");
1229   _parallel_workers->run_task(&markingTask);
1230   print_stats();
1231 }
1232 
1233 // Helper class to get rid of some boilerplate code.
1234 class G1CMTraceTime : public GCTraceTime {
1235   static bool doit_and_prepend(bool doit) {
1236     if (doit) {
1237       gclog_or_tty->put(' ');
1238     }
1239     return doit;
1240   }
1241 
1242  public:
1243   G1CMTraceTime(const char* title, bool doit)
1244     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1245         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1246   }
1247 };
1248 
1249 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1250   // world is stopped at this checkpoint
1251   assert(SafepointSynchronize::is_at_safepoint(),
1252          "world should be stopped");
1253 
1254   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1255 
1256   // If a full collection has happened, we shouldn't do this.
1257   if (has_aborted()) {
1258     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1259     return;
1260   }
1261 
1262   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1263 
1264   if (VerifyDuringGC) {
1265     HandleMark hm;  // handle scope
1266     g1h->prepare_for_verify();
1267     Universe::verify(VerifyOption_G1UsePrevMarking,
1268                      " VerifyDuringGC:(before)");
1269   }
1270   g1h->check_bitmaps("Remark Start");
1271 
1272   G1CollectorPolicy* g1p = g1h->g1_policy();
1273   g1p->record_concurrent_mark_remark_start();
1274 
1275   double start = os::elapsedTime();
1276 
1277   checkpointRootsFinalWork();
1278 
1279   double mark_work_end = os::elapsedTime();
1280 
1281   weakRefsWork(clear_all_soft_refs);
1282 
1283   if (has_overflown()) {
1284     // Oops.  We overflowed.  Restart concurrent marking.
1285     _restart_for_overflow = true;
1286     if (G1TraceMarkStackOverflow) {
1287       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1288     }
1289 
1290     // Verify the heap w.r.t. the previous marking bitmap.
1291     if (VerifyDuringGC) {
1292       HandleMark hm;  // handle scope
1293       g1h->prepare_for_verify();
1294       Universe::verify(VerifyOption_G1UsePrevMarking,
1295                        " VerifyDuringGC:(overflow)");
1296     }
1297 
1298     // Clear the marking state because we will be restarting
1299     // marking due to overflowing the global mark stack.
1300     reset_marking_state();
1301   } else {
1302     {
1303       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1304 
1305       // Aggregate the per-task counting data that we have accumulated
1306       // while marking.
1307       aggregate_count_data();
1308     }
1309 
1310     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1311     // We're done with marking.
1312     // This is the end of  the marking cycle, we're expected all
1313     // threads to have SATB queues with active set to true.
1314     satb_mq_set.set_active_all_threads(false, /* new active value */
1315                                        true /* expected_active */);
1316 
1317     if (VerifyDuringGC) {
1318       HandleMark hm;  // handle scope
1319       g1h->prepare_for_verify();
1320       Universe::verify(VerifyOption_G1UseNextMarking,
1321                        " VerifyDuringGC:(after)");
1322     }
1323     g1h->check_bitmaps("Remark End");
1324     assert(!restart_for_overflow(), "sanity");
1325     // Completely reset the marking state since marking completed
1326     set_non_marking_state();
1327   }
1328 
1329   // Expand the marking stack, if we have to and if we can.
1330   if (_markStack.should_expand()) {
1331     _markStack.expand();
1332   }
1333 
1334   // Statistics
1335   double now = os::elapsedTime();
1336   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1337   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1338   _remark_times.add((now - start) * 1000.0);
1339 
1340   g1p->record_concurrent_mark_remark_end();
1341 
1342   G1CMIsAliveClosure is_alive(g1h);
1343   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1344 }
1345 
1346 // Base class of the closures that finalize and verify the
1347 // liveness counting data.
1348 class CMCountDataClosureBase: public HeapRegionClosure {
1349 protected:
1350   G1CollectedHeap* _g1h;
1351   ConcurrentMark* _cm;
1352   CardTableModRefBS* _ct_bs;
1353 
1354   BitMap* _region_bm;
1355   BitMap* _card_bm;
1356 
1357   // Takes a region that's not empty (i.e., it has at least one
1358   // live object in it and sets its corresponding bit on the region
1359   // bitmap to 1. If the region is "starts humongous" it will also set
1360   // to 1 the bits on the region bitmap that correspond to its
1361   // associated "continues humongous" regions.
1362   void set_bit_for_region(HeapRegion* hr) {
1363     assert(!hr->is_continues_humongous(), "should have filtered those out");
1364 
1365     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1366     if (!hr->is_starts_humongous()) {
1367       // Normal (non-humongous) case: just set the bit.
1368       _region_bm->par_at_put(index, true);
1369     } else {
1370       // Starts humongous case: calculate how many regions are part of
1371       // this humongous region and then set the bit range.
1372       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1373       _region_bm->par_at_put_range(index, end_index, true);
1374     }
1375   }
1376 
1377 public:
1378   CMCountDataClosureBase(G1CollectedHeap* g1h,
1379                          BitMap* region_bm, BitMap* card_bm):
1380     _g1h(g1h), _cm(g1h->concurrent_mark()),
1381     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1382     _region_bm(region_bm), _card_bm(card_bm) { }
1383 };
1384 
1385 // Closure that calculates the # live objects per region. Used
1386 // for verification purposes during the cleanup pause.
1387 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1388   CMBitMapRO* _bm;
1389   size_t _region_marked_bytes;
1390 
1391 public:
1392   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1393                          BitMap* region_bm, BitMap* card_bm) :
1394     CMCountDataClosureBase(g1h, region_bm, card_bm),
1395     _bm(bm), _region_marked_bytes(0) { }
1396 
1397   bool doHeapRegion(HeapRegion* hr) {
1398 
1399     if (hr->is_continues_humongous()) {
1400       // We will ignore these here and process them when their
1401       // associated "starts humongous" region is processed (see
1402       // set_bit_for_heap_region()). Note that we cannot rely on their
1403       // associated "starts humongous" region to have their bit set to
1404       // 1 since, due to the region chunking in the parallel region
1405       // iteration, a "continues humongous" region might be visited
1406       // before its associated "starts humongous".
1407       return false;
1408     }
1409 
1410     HeapWord* ntams = hr->next_top_at_mark_start();
1411     HeapWord* start = hr->bottom();
1412 
1413     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1414            err_msg("Preconditions not met - "
1415                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1416                    p2i(start), p2i(ntams), p2i(hr->end())));
1417 
1418     // Find the first marked object at or after "start".
1419     start = _bm->getNextMarkedWordAddress(start, ntams);
1420 
1421     size_t marked_bytes = 0;
1422 
1423     while (start < ntams) {
1424       oop obj = oop(start);
1425       int obj_sz = obj->size();
1426       HeapWord* obj_end = start + obj_sz;
1427 
1428       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1429       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1430 
1431       // Note: if we're looking at the last region in heap - obj_end
1432       // could be actually just beyond the end of the heap; end_idx
1433       // will then correspond to a (non-existent) card that is also
1434       // just beyond the heap.
1435       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1436         // end of object is not card aligned - increment to cover
1437         // all the cards spanned by the object
1438         end_idx += 1;
1439       }
1440 
1441       // Set the bits in the card BM for the cards spanned by this object.
1442       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1443 
1444       // Add the size of this object to the number of marked bytes.
1445       marked_bytes += (size_t)obj_sz * HeapWordSize;
1446 
1447       // Find the next marked object after this one.
1448       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1449     }
1450 
1451     // Mark the allocated-since-marking portion...
1452     HeapWord* top = hr->top();
1453     if (ntams < top) {
1454       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1455       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1456 
1457       // Note: if we're looking at the last region in heap - top
1458       // could be actually just beyond the end of the heap; end_idx
1459       // will then correspond to a (non-existent) card that is also
1460       // just beyond the heap.
1461       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1462         // end of object is not card aligned - increment to cover
1463         // all the cards spanned by the object
1464         end_idx += 1;
1465       }
1466       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1467 
1468       // This definitely means the region has live objects.
1469       set_bit_for_region(hr);
1470     }
1471 
1472     // Update the live region bitmap.
1473     if (marked_bytes > 0) {
1474       set_bit_for_region(hr);
1475     }
1476 
1477     // Set the marked bytes for the current region so that
1478     // it can be queried by a calling verification routine
1479     _region_marked_bytes = marked_bytes;
1480 
1481     return false;
1482   }
1483 
1484   size_t region_marked_bytes() const { return _region_marked_bytes; }
1485 };
1486 
1487 // Heap region closure used for verifying the counting data
1488 // that was accumulated concurrently and aggregated during
1489 // the remark pause. This closure is applied to the heap
1490 // regions during the STW cleanup pause.
1491 
1492 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1493   G1CollectedHeap* _g1h;
1494   ConcurrentMark* _cm;
1495   CalcLiveObjectsClosure _calc_cl;
1496   BitMap* _region_bm;   // Region BM to be verified
1497   BitMap* _card_bm;     // Card BM to be verified
1498   bool _verbose;        // verbose output?
1499 
1500   BitMap* _exp_region_bm; // Expected Region BM values
1501   BitMap* _exp_card_bm;   // Expected card BM values
1502 
1503   int _failures;
1504 
1505 public:
1506   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1507                                 BitMap* region_bm,
1508                                 BitMap* card_bm,
1509                                 BitMap* exp_region_bm,
1510                                 BitMap* exp_card_bm,
1511                                 bool verbose) :
1512     _g1h(g1h), _cm(g1h->concurrent_mark()),
1513     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1514     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1515     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1516     _failures(0) { }
1517 
1518   int failures() const { return _failures; }
1519 
1520   bool doHeapRegion(HeapRegion* hr) {
1521     if (hr->is_continues_humongous()) {
1522       // We will ignore these here and process them when their
1523       // associated "starts humongous" region is processed (see
1524       // set_bit_for_heap_region()). Note that we cannot rely on their
1525       // associated "starts humongous" region to have their bit set to
1526       // 1 since, due to the region chunking in the parallel region
1527       // iteration, a "continues humongous" region might be visited
1528       // before its associated "starts humongous".
1529       return false;
1530     }
1531 
1532     int failures = 0;
1533 
1534     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1535     // this region and set the corresponding bits in the expected region
1536     // and card bitmaps.
1537     bool res = _calc_cl.doHeapRegion(hr);
1538     assert(res == false, "should be continuing");
1539 
1540     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1541                     Mutex::_no_safepoint_check_flag);
1542 
1543     // Verify the marked bytes for this region.
1544     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1545     size_t act_marked_bytes = hr->next_marked_bytes();
1546 
1547     // We're not OK if expected marked bytes > actual marked bytes. It means
1548     // we have missed accounting some objects during the actual marking.
1549     if (exp_marked_bytes > act_marked_bytes) {
1550       if (_verbose) {
1551         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1552                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1553                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1554       }
1555       failures += 1;
1556     }
1557 
1558     // Verify the bit, for this region, in the actual and expected
1559     // (which was just calculated) region bit maps.
1560     // We're not OK if the bit in the calculated expected region
1561     // bitmap is set and the bit in the actual region bitmap is not.
1562     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1563 
1564     bool expected = _exp_region_bm->at(index);
1565     bool actual = _region_bm->at(index);
1566     if (expected && !actual) {
1567       if (_verbose) {
1568         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1569                                "expected: %s, actual: %s",
1570                                hr->hrm_index(),
1571                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1572       }
1573       failures += 1;
1574     }
1575 
1576     // Verify that the card bit maps for the cards spanned by the current
1577     // region match. We have an error if we have a set bit in the expected
1578     // bit map and the corresponding bit in the actual bitmap is not set.
1579 
1580     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1581     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1582 
1583     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1584       expected = _exp_card_bm->at(i);
1585       actual = _card_bm->at(i);
1586 
1587       if (expected && !actual) {
1588         if (_verbose) {
1589           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1590                                  "expected: %s, actual: %s",
1591                                  hr->hrm_index(), i,
1592                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1593         }
1594         failures += 1;
1595       }
1596     }
1597 
1598     if (failures > 0 && _verbose)  {
1599       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1600                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1601                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1602                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1603     }
1604 
1605     _failures += failures;
1606 
1607     // We could stop iteration over the heap when we
1608     // find the first violating region by returning true.
1609     return false;
1610   }
1611 };
1612 
1613 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1614 protected:
1615   G1CollectedHeap* _g1h;
1616   ConcurrentMark* _cm;
1617   BitMap* _actual_region_bm;
1618   BitMap* _actual_card_bm;
1619 
1620   uint    _n_workers;
1621 
1622   BitMap* _expected_region_bm;
1623   BitMap* _expected_card_bm;
1624 
1625   int  _failures;
1626   bool _verbose;
1627 
1628   HeapRegionClaimer _hrclaimer;
1629 
1630 public:
1631   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1632                             BitMap* region_bm, BitMap* card_bm,
1633                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1634     : AbstractGangTask("G1 verify final counting"),
1635       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1636       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1637       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1638       _failures(0), _verbose(false),
1639       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1640     assert(VerifyDuringGC, "don't call this otherwise");
1641     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1642     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1643 
1644     _verbose = _cm->verbose_medium();
1645   }
1646 
1647   void work(uint worker_id) {
1648     assert(worker_id < _n_workers, "invariant");
1649 
1650     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1651                                             _actual_region_bm, _actual_card_bm,
1652                                             _expected_region_bm,
1653                                             _expected_card_bm,
1654                                             _verbose);
1655 
1656     _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
1657 
1658     Atomic::add(verify_cl.failures(), &_failures);
1659   }
1660 
1661   int failures() const { return _failures; }
1662 };
1663 
1664 // Closure that finalizes the liveness counting data.
1665 // Used during the cleanup pause.
1666 // Sets the bits corresponding to the interval [NTAMS, top]
1667 // (which contains the implicitly live objects) in the
1668 // card liveness bitmap. Also sets the bit for each region,
1669 // containing live data, in the region liveness bitmap.
1670 
1671 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1672  public:
1673   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1674                               BitMap* region_bm,
1675                               BitMap* card_bm) :
1676     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1677 
1678   bool doHeapRegion(HeapRegion* hr) {
1679 
1680     if (hr->is_continues_humongous()) {
1681       // We will ignore these here and process them when their
1682       // associated "starts humongous" region is processed (see
1683       // set_bit_for_heap_region()). Note that we cannot rely on their
1684       // associated "starts humongous" region to have their bit set to
1685       // 1 since, due to the region chunking in the parallel region
1686       // iteration, a "continues humongous" region might be visited
1687       // before its associated "starts humongous".
1688       return false;
1689     }
1690 
1691     HeapWord* ntams = hr->next_top_at_mark_start();
1692     HeapWord* top   = hr->top();
1693 
1694     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1695 
1696     // Mark the allocated-since-marking portion...
1697     if (ntams < top) {
1698       // This definitely means the region has live objects.
1699       set_bit_for_region(hr);
1700 
1701       // Now set the bits in the card bitmap for [ntams, top)
1702       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1703       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1704 
1705       // Note: if we're looking at the last region in heap - top
1706       // could be actually just beyond the end of the heap; end_idx
1707       // will then correspond to a (non-existent) card that is also
1708       // just beyond the heap.
1709       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1710         // end of object is not card aligned - increment to cover
1711         // all the cards spanned by the object
1712         end_idx += 1;
1713       }
1714 
1715       assert(end_idx <= _card_bm->size(),
1716              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1717                      end_idx, _card_bm->size()));
1718       assert(start_idx < _card_bm->size(),
1719              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1720                      start_idx, _card_bm->size()));
1721 
1722       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1723     }
1724 
1725     // Set the bit for the region if it contains live data
1726     if (hr->next_marked_bytes() > 0) {
1727       set_bit_for_region(hr);
1728     }
1729 
1730     return false;
1731   }
1732 };
1733 
1734 class G1ParFinalCountTask: public AbstractGangTask {
1735 protected:
1736   G1CollectedHeap* _g1h;
1737   ConcurrentMark* _cm;
1738   BitMap* _actual_region_bm;
1739   BitMap* _actual_card_bm;
1740 
1741   uint    _n_workers;
1742   HeapRegionClaimer _hrclaimer;
1743 
1744 public:
1745   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1746     : AbstractGangTask("G1 final counting"),
1747       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1748       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1749       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1750   }
1751 
1752   void work(uint worker_id) {
1753     assert(worker_id < _n_workers, "invariant");
1754 
1755     FinalCountDataUpdateClosure final_update_cl(_g1h,
1756                                                 _actual_region_bm,
1757                                                 _actual_card_bm);
1758 
1759     _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
1760   }
1761 };
1762 
1763 class G1ParNoteEndTask;
1764 
1765 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1766   G1CollectedHeap* _g1;
1767   size_t _max_live_bytes;
1768   uint _regions_claimed;
1769   size_t _freed_bytes;
1770   FreeRegionList* _local_cleanup_list;
1771   HeapRegionSetCount _old_regions_removed;
1772   HeapRegionSetCount _humongous_regions_removed;
1773   HRRSCleanupTask* _hrrs_cleanup_task;
1774   double _claimed_region_time;
1775   double _max_region_time;
1776 
1777 public:
1778   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1779                              FreeRegionList* local_cleanup_list,
1780                              HRRSCleanupTask* hrrs_cleanup_task) :
1781     _g1(g1),
1782     _max_live_bytes(0), _regions_claimed(0),
1783     _freed_bytes(0),
1784     _claimed_region_time(0.0), _max_region_time(0.0),
1785     _local_cleanup_list(local_cleanup_list),
1786     _old_regions_removed(),
1787     _humongous_regions_removed(),
1788     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1789 
1790   size_t freed_bytes() { return _freed_bytes; }
1791   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1792   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1793 
1794   bool doHeapRegion(HeapRegion *hr) {
1795     if (hr->is_continues_humongous()) {
1796       return false;
1797     }
1798     // We use a claim value of zero here because all regions
1799     // were claimed with value 1 in the FinalCount task.
1800     _g1->reset_gc_time_stamps(hr);
1801     double start = os::elapsedTime();
1802     _regions_claimed++;
1803     hr->note_end_of_marking();
1804     _max_live_bytes += hr->max_live_bytes();
1805 
1806     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1807       _freed_bytes += hr->used();
1808       hr->set_containing_set(NULL);
1809       if (hr->is_humongous()) {
1810         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1811         _humongous_regions_removed.increment(1u, hr->capacity());
1812         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1813       } else {
1814         _old_regions_removed.increment(1u, hr->capacity());
1815         _g1->free_region(hr, _local_cleanup_list, true);
1816       }
1817     } else {
1818       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1819     }
1820 
1821     double region_time = (os::elapsedTime() - start);
1822     _claimed_region_time += region_time;
1823     if (region_time > _max_region_time) {
1824       _max_region_time = region_time;
1825     }
1826     return false;
1827   }
1828 
1829   size_t max_live_bytes() { return _max_live_bytes; }
1830   uint regions_claimed() { return _regions_claimed; }
1831   double claimed_region_time_sec() { return _claimed_region_time; }
1832   double max_region_time_sec() { return _max_region_time; }
1833 };
1834 
1835 class G1ParNoteEndTask: public AbstractGangTask {
1836   friend class G1NoteEndOfConcMarkClosure;
1837 
1838 protected:
1839   G1CollectedHeap* _g1h;
1840   size_t _max_live_bytes;
1841   size_t _freed_bytes;
1842   FreeRegionList* _cleanup_list;
1843   HeapRegionClaimer _hrclaimer;
1844 
1845 public:
1846   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1847       AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1848   }
1849 
1850   void work(uint worker_id) {
1851     FreeRegionList local_cleanup_list("Local Cleanup List");
1852     HRRSCleanupTask hrrs_cleanup_task;
1853     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1854                                            &hrrs_cleanup_task);
1855     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1856     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1857 
1858     // Now update the lists
1859     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1860     {
1861       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1862       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1863       _max_live_bytes += g1_note_end.max_live_bytes();
1864       _freed_bytes += g1_note_end.freed_bytes();
1865 
1866       // If we iterate over the global cleanup list at the end of
1867       // cleanup to do this printing we will not guarantee to only
1868       // generate output for the newly-reclaimed regions (the list
1869       // might not be empty at the beginning of cleanup; we might
1870       // still be working on its previous contents). So we do the
1871       // printing here, before we append the new regions to the global
1872       // cleanup list.
1873 
1874       G1HRPrinter* hr_printer = _g1h->hr_printer();
1875       if (hr_printer->is_active()) {
1876         FreeRegionListIterator iter(&local_cleanup_list);
1877         while (iter.more_available()) {
1878           HeapRegion* hr = iter.get_next();
1879           hr_printer->cleanup(hr);
1880         }
1881       }
1882 
1883       _cleanup_list->add_ordered(&local_cleanup_list);
1884       assert(local_cleanup_list.is_empty(), "post-condition");
1885 
1886       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1887     }
1888   }
1889   size_t max_live_bytes() { return _max_live_bytes; }
1890   size_t freed_bytes() { return _freed_bytes; }
1891 };
1892 
1893 class G1ParScrubRemSetTask: public AbstractGangTask {
1894 protected:
1895   G1RemSet* _g1rs;
1896   BitMap* _region_bm;
1897   BitMap* _card_bm;
1898   HeapRegionClaimer _hrclaimer;
1899 
1900 public:
1901   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1902       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
1903   }
1904 
1905   void work(uint worker_id) {
1906     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
1907   }
1908 
1909 };
1910 
1911 void ConcurrentMark::cleanup() {
1912   // world is stopped at this checkpoint
1913   assert(SafepointSynchronize::is_at_safepoint(),
1914          "world should be stopped");
1915   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1916 
1917   // If a full collection has happened, we shouldn't do this.
1918   if (has_aborted()) {
1919     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1920     return;
1921   }
1922 
1923   g1h->verify_region_sets_optional();
1924 
1925   if (VerifyDuringGC) {
1926     HandleMark hm;  // handle scope
1927     g1h->prepare_for_verify();
1928     Universe::verify(VerifyOption_G1UsePrevMarking,
1929                      " VerifyDuringGC:(before)");
1930   }
1931   g1h->check_bitmaps("Cleanup Start");
1932 
1933   G1CollectorPolicy* g1p = g1h->g1_policy();
1934   g1p->record_concurrent_mark_cleanup_start();
1935 
1936   double start = os::elapsedTime();
1937 
1938   HeapRegionRemSet::reset_for_cleanup_tasks();
1939 
1940   uint n_workers;
1941 
1942   // Do counting once more with the world stopped for good measure.
1943   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1944 
1945   g1h->set_par_threads();
1946   n_workers = g1h->n_par_threads();
1947   assert(g1h->n_par_threads() == n_workers,
1948          "Should not have been reset");
1949   g1h->workers()->run_task(&g1_par_count_task);
1950   // Done with the parallel phase so reset to 0.
1951   g1h->set_par_threads(0);
1952 
1953   if (VerifyDuringGC) {
1954     // Verify that the counting data accumulated during marking matches
1955     // that calculated by walking the marking bitmap.
1956 
1957     // Bitmaps to hold expected values
1958     BitMap expected_region_bm(_region_bm.size(), true);
1959     BitMap expected_card_bm(_card_bm.size(), true);
1960 
1961     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1962                                                  &_region_bm,
1963                                                  &_card_bm,
1964                                                  &expected_region_bm,
1965                                                  &expected_card_bm);
1966 
1967     g1h->set_par_threads((int)n_workers);
1968     g1h->workers()->run_task(&g1_par_verify_task);
1969     // Done with the parallel phase so reset to 0.
1970     g1h->set_par_threads(0);
1971 
1972     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1973   }
1974 
1975   size_t start_used_bytes = g1h->used();
1976   g1h->set_marking_complete();
1977 
1978   double count_end = os::elapsedTime();
1979   double this_final_counting_time = (count_end - start);
1980   _total_counting_time += this_final_counting_time;
1981 
1982   if (G1PrintRegionLivenessInfo) {
1983     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1984     _g1h->heap_region_iterate(&cl);
1985   }
1986 
1987   // Install newly created mark bitMap as "prev".
1988   swapMarkBitMaps();
1989 
1990   g1h->reset_gc_time_stamp();
1991 
1992   // Note end of marking in all heap regions.
1993   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1994   g1h->set_par_threads((int)n_workers);
1995   g1h->workers()->run_task(&g1_par_note_end_task);
1996   g1h->set_par_threads(0);
1997   g1h->check_gc_time_stamps();
1998 
1999   if (!cleanup_list_is_empty()) {
2000     // The cleanup list is not empty, so we'll have to process it
2001     // concurrently. Notify anyone else that might be wanting free
2002     // regions that there will be more free regions coming soon.
2003     g1h->set_free_regions_coming();
2004   }
2005 
2006   // call below, since it affects the metric by which we sort the heap
2007   // regions.
2008   if (G1ScrubRemSets) {
2009     double rs_scrub_start = os::elapsedTime();
2010     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2011     g1h->set_par_threads((int)n_workers);
2012     g1h->workers()->run_task(&g1_par_scrub_rs_task);
2013     g1h->set_par_threads(0);
2014 
2015     double rs_scrub_end = os::elapsedTime();
2016     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2017     _total_rs_scrub_time += this_rs_scrub_time;
2018   }
2019 
2020   // this will also free any regions totally full of garbage objects,
2021   // and sort the regions.
2022   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2023 
2024   // Statistics.
2025   double end = os::elapsedTime();
2026   _cleanup_times.add((end - start) * 1000.0);
2027 
2028   if (G1Log::fine()) {
2029     g1h->g1_policy()->print_heap_transition(start_used_bytes);
2030   }
2031 
2032   // Clean up will have freed any regions completely full of garbage.
2033   // Update the soft reference policy with the new heap occupancy.
2034   Universe::update_heap_info_at_gc();
2035 
2036   if (VerifyDuringGC) {
2037     HandleMark hm;  // handle scope
2038     g1h->prepare_for_verify();
2039     Universe::verify(VerifyOption_G1UsePrevMarking,
2040                      " VerifyDuringGC:(after)");
2041   }
2042 
2043   g1h->check_bitmaps("Cleanup End");
2044 
2045   g1h->verify_region_sets_optional();
2046 
2047   // We need to make this be a "collection" so any collection pause that
2048   // races with it goes around and waits for completeCleanup to finish.
2049   g1h->increment_total_collections();
2050 
2051   // Clean out dead classes and update Metaspace sizes.
2052   if (ClassUnloadingWithConcurrentMark) {
2053     ClassLoaderDataGraph::purge();
2054   }
2055   MetaspaceGC::compute_new_size();
2056 
2057   // We reclaimed old regions so we should calculate the sizes to make
2058   // sure we update the old gen/space data.
2059   g1h->g1mm()->update_sizes();
2060   g1h->allocation_context_stats().update_after_mark();
2061 
2062   g1h->trace_heap_after_concurrent_cycle();
2063 }
2064 
2065 void ConcurrentMark::completeCleanup() {
2066   if (has_aborted()) return;
2067 
2068   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2069 
2070   _cleanup_list.verify_optional();
2071   FreeRegionList tmp_free_list("Tmp Free List");
2072 
2073   if (G1ConcRegionFreeingVerbose) {
2074     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2075                            "cleanup list has %u entries",
2076                            _cleanup_list.length());
2077   }
2078 
2079   // No one else should be accessing the _cleanup_list at this point,
2080   // so it is not necessary to take any locks
2081   while (!_cleanup_list.is_empty()) {
2082     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2083     assert(hr != NULL, "Got NULL from a non-empty list");
2084     hr->par_clear();
2085     tmp_free_list.add_ordered(hr);
2086 
2087     // Instead of adding one region at a time to the secondary_free_list,
2088     // we accumulate them in the local list and move them a few at a
2089     // time. This also cuts down on the number of notify_all() calls
2090     // we do during this process. We'll also append the local list when
2091     // _cleanup_list is empty (which means we just removed the last
2092     // region from the _cleanup_list).
2093     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2094         _cleanup_list.is_empty()) {
2095       if (G1ConcRegionFreeingVerbose) {
2096         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2097                                "appending %u entries to the secondary_free_list, "
2098                                "cleanup list still has %u entries",
2099                                tmp_free_list.length(),
2100                                _cleanup_list.length());
2101       }
2102 
2103       {
2104         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2105         g1h->secondary_free_list_add(&tmp_free_list);
2106         SecondaryFreeList_lock->notify_all();
2107       }
2108 #ifndef PRODUCT
2109       if (G1StressConcRegionFreeing) {
2110         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2111           os::sleep(Thread::current(), (jlong) 1, false);
2112         }
2113       }
2114 #endif
2115     }
2116   }
2117   assert(tmp_free_list.is_empty(), "post-condition");
2118 }
2119 
2120 // Supporting Object and Oop closures for reference discovery
2121 // and processing in during marking
2122 
2123 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2124   HeapWord* addr = (HeapWord*)obj;
2125   return addr != NULL &&
2126          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2127 }
2128 
2129 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2130 // Uses the CMTask associated with a worker thread (for serial reference
2131 // processing the CMTask for worker 0 is used) to preserve (mark) and
2132 // trace referent objects.
2133 //
2134 // Using the CMTask and embedded local queues avoids having the worker
2135 // threads operating on the global mark stack. This reduces the risk
2136 // of overflowing the stack - which we would rather avoid at this late
2137 // state. Also using the tasks' local queues removes the potential
2138 // of the workers interfering with each other that could occur if
2139 // operating on the global stack.
2140 
2141 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2142   ConcurrentMark* _cm;
2143   CMTask*         _task;
2144   int             _ref_counter_limit;
2145   int             _ref_counter;
2146   bool            _is_serial;
2147  public:
2148   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2149     _cm(cm), _task(task), _is_serial(is_serial),
2150     _ref_counter_limit(G1RefProcDrainInterval) {
2151     assert(_ref_counter_limit > 0, "sanity");
2152     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2153     _ref_counter = _ref_counter_limit;
2154   }
2155 
2156   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2157   virtual void do_oop(      oop* p) { do_oop_work(p); }
2158 
2159   template <class T> void do_oop_work(T* p) {
2160     if (!_cm->has_overflown()) {
2161       oop obj = oopDesc::load_decode_heap_oop(p);
2162       if (_cm->verbose_high()) {
2163         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2164                                "*"PTR_FORMAT" = "PTR_FORMAT,
2165                                _task->worker_id(), p2i(p), p2i((void*) obj));
2166       }
2167 
2168       _task->deal_with_reference(obj);
2169       _ref_counter--;
2170 
2171       if (_ref_counter == 0) {
2172         // We have dealt with _ref_counter_limit references, pushing them
2173         // and objects reachable from them on to the local stack (and
2174         // possibly the global stack). Call CMTask::do_marking_step() to
2175         // process these entries.
2176         //
2177         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2178         // there's nothing more to do (i.e. we're done with the entries that
2179         // were pushed as a result of the CMTask::deal_with_reference() calls
2180         // above) or we overflow.
2181         //
2182         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2183         // flag while there may still be some work to do. (See the comment at
2184         // the beginning of CMTask::do_marking_step() for those conditions -
2185         // one of which is reaching the specified time target.) It is only
2186         // when CMTask::do_marking_step() returns without setting the
2187         // has_aborted() flag that the marking step has completed.
2188         do {
2189           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2190           _task->do_marking_step(mark_step_duration_ms,
2191                                  false      /* do_termination */,
2192                                  _is_serial);
2193         } while (_task->has_aborted() && !_cm->has_overflown());
2194         _ref_counter = _ref_counter_limit;
2195       }
2196     } else {
2197       if (_cm->verbose_high()) {
2198          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2199       }
2200     }
2201   }
2202 };
2203 
2204 // 'Drain' oop closure used by both serial and parallel reference processing.
2205 // Uses the CMTask associated with a given worker thread (for serial
2206 // reference processing the CMtask for worker 0 is used). Calls the
2207 // do_marking_step routine, with an unbelievably large timeout value,
2208 // to drain the marking data structures of the remaining entries
2209 // added by the 'keep alive' oop closure above.
2210 
2211 class G1CMDrainMarkingStackClosure: public VoidClosure {
2212   ConcurrentMark* _cm;
2213   CMTask*         _task;
2214   bool            _is_serial;
2215  public:
2216   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2217     _cm(cm), _task(task), _is_serial(is_serial) {
2218     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2219   }
2220 
2221   void do_void() {
2222     do {
2223       if (_cm->verbose_high()) {
2224         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2225                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2226       }
2227 
2228       // We call CMTask::do_marking_step() to completely drain the local
2229       // and global marking stacks of entries pushed by the 'keep alive'
2230       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2231       //
2232       // CMTask::do_marking_step() is called in a loop, which we'll exit
2233       // if there's nothing more to do (i.e. we've completely drained the
2234       // entries that were pushed as a a result of applying the 'keep alive'
2235       // closure to the entries on the discovered ref lists) or we overflow
2236       // the global marking stack.
2237       //
2238       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2239       // flag while there may still be some work to do. (See the comment at
2240       // the beginning of CMTask::do_marking_step() for those conditions -
2241       // one of which is reaching the specified time target.) It is only
2242       // when CMTask::do_marking_step() returns without setting the
2243       // has_aborted() flag that the marking step has completed.
2244 
2245       _task->do_marking_step(1000000000.0 /* something very large */,
2246                              true         /* do_termination */,
2247                              _is_serial);
2248     } while (_task->has_aborted() && !_cm->has_overflown());
2249   }
2250 };
2251 
2252 // Implementation of AbstractRefProcTaskExecutor for parallel
2253 // reference processing at the end of G1 concurrent marking
2254 
2255 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2256 private:
2257   G1CollectedHeap* _g1h;
2258   ConcurrentMark*  _cm;
2259   WorkGang*        _workers;
2260   uint             _active_workers;
2261 
2262 public:
2263   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2264                           ConcurrentMark* cm,
2265                           WorkGang* workers,
2266                           uint n_workers) :
2267     _g1h(g1h), _cm(cm),
2268     _workers(workers), _active_workers(n_workers) { }
2269 
2270   // Executes the given task using concurrent marking worker threads.
2271   virtual void execute(ProcessTask& task);
2272   virtual void execute(EnqueueTask& task);
2273 };
2274 
2275 class G1CMRefProcTaskProxy: public AbstractGangTask {
2276   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2277   ProcessTask&     _proc_task;
2278   G1CollectedHeap* _g1h;
2279   ConcurrentMark*  _cm;
2280 
2281 public:
2282   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2283                      G1CollectedHeap* g1h,
2284                      ConcurrentMark* cm) :
2285     AbstractGangTask("Process reference objects in parallel"),
2286     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2287     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2288     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2289   }
2290 
2291   virtual void work(uint worker_id) {
2292     ResourceMark rm;
2293     HandleMark hm;
2294     CMTask* task = _cm->task(worker_id);
2295     G1CMIsAliveClosure g1_is_alive(_g1h);
2296     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2297     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2298 
2299     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2300   }
2301 };
2302 
2303 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2304   assert(_workers != NULL, "Need parallel worker threads.");
2305   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2306 
2307   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2308 
2309   // We need to reset the concurrency level before each
2310   // proxy task execution, so that the termination protocol
2311   // and overflow handling in CMTask::do_marking_step() knows
2312   // how many workers to wait for.
2313   _cm->set_concurrency(_active_workers);
2314   _g1h->set_par_threads(_active_workers);
2315   _workers->run_task(&proc_task_proxy);
2316   _g1h->set_par_threads(0);
2317 }
2318 
2319 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2320   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2321   EnqueueTask& _enq_task;
2322 
2323 public:
2324   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2325     AbstractGangTask("Enqueue reference objects in parallel"),
2326     _enq_task(enq_task) { }
2327 
2328   virtual void work(uint worker_id) {
2329     _enq_task.work(worker_id);
2330   }
2331 };
2332 
2333 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2334   assert(_workers != NULL, "Need parallel worker threads.");
2335   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2336 
2337   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2338 
2339   // Not strictly necessary but...
2340   //
2341   // We need to reset the concurrency level before each
2342   // proxy task execution, so that the termination protocol
2343   // and overflow handling in CMTask::do_marking_step() knows
2344   // how many workers to wait for.
2345   _cm->set_concurrency(_active_workers);
2346   _g1h->set_par_threads(_active_workers);
2347   _workers->run_task(&enq_task_proxy);
2348   _g1h->set_par_threads(0);
2349 }
2350 
2351 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2352   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2353 }
2354 
2355 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2356   if (has_overflown()) {
2357     // Skip processing the discovered references if we have
2358     // overflown the global marking stack. Reference objects
2359     // only get discovered once so it is OK to not
2360     // de-populate the discovered reference lists. We could have,
2361     // but the only benefit would be that, when marking restarts,
2362     // less reference objects are discovered.
2363     return;
2364   }
2365 
2366   ResourceMark rm;
2367   HandleMark   hm;
2368 
2369   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2370 
2371   // Is alive closure.
2372   G1CMIsAliveClosure g1_is_alive(g1h);
2373 
2374   // Inner scope to exclude the cleaning of the string and symbol
2375   // tables from the displayed time.
2376   {
2377     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2378 
2379     ReferenceProcessor* rp = g1h->ref_processor_cm();
2380 
2381     // See the comment in G1CollectedHeap::ref_processing_init()
2382     // about how reference processing currently works in G1.
2383 
2384     // Set the soft reference policy
2385     rp->setup_policy(clear_all_soft_refs);
2386     assert(_markStack.isEmpty(), "mark stack should be empty");
2387 
2388     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2389     // in serial reference processing. Note these closures are also
2390     // used for serially processing (by the the current thread) the
2391     // JNI references during parallel reference processing.
2392     //
2393     // These closures do not need to synchronize with the worker
2394     // threads involved in parallel reference processing as these
2395     // instances are executed serially by the current thread (e.g.
2396     // reference processing is not multi-threaded and is thus
2397     // performed by the current thread instead of a gang worker).
2398     //
2399     // The gang tasks involved in parallel reference processing create
2400     // their own instances of these closures, which do their own
2401     // synchronization among themselves.
2402     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2403     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2404 
2405     // We need at least one active thread. If reference processing
2406     // is not multi-threaded we use the current (VMThread) thread,
2407     // otherwise we use the work gang from the G1CollectedHeap and
2408     // we utilize all the worker threads we can.
2409     bool processing_is_mt = rp->processing_is_mt();
2410     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2411     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2412 
2413     // Parallel processing task executor.
2414     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2415                                               g1h->workers(), active_workers);
2416     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2417 
2418     // Set the concurrency level. The phase was already set prior to
2419     // executing the remark task.
2420     set_concurrency(active_workers);
2421 
2422     // Set the degree of MT processing here.  If the discovery was done MT,
2423     // the number of threads involved during discovery could differ from
2424     // the number of active workers.  This is OK as long as the discovered
2425     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2426     rp->set_active_mt_degree(active_workers);
2427 
2428     // Process the weak references.
2429     const ReferenceProcessorStats& stats =
2430         rp->process_discovered_references(&g1_is_alive,
2431                                           &g1_keep_alive,
2432                                           &g1_drain_mark_stack,
2433                                           executor,
2434                                           g1h->gc_timer_cm(),
2435                                           concurrent_gc_id());
2436     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2437 
2438     // The do_oop work routines of the keep_alive and drain_marking_stack
2439     // oop closures will set the has_overflown flag if we overflow the
2440     // global marking stack.
2441 
2442     assert(_markStack.overflow() || _markStack.isEmpty(),
2443             "mark stack should be empty (unless it overflowed)");
2444 
2445     if (_markStack.overflow()) {
2446       // This should have been done already when we tried to push an
2447       // entry on to the global mark stack. But let's do it again.
2448       set_has_overflown();
2449     }
2450 
2451     assert(rp->num_q() == active_workers, "why not");
2452 
2453     rp->enqueue_discovered_references(executor);
2454 
2455     rp->verify_no_references_recorded();
2456     assert(!rp->discovery_enabled(), "Post condition");
2457   }
2458 
2459   if (has_overflown()) {
2460     // We can not trust g1_is_alive if the marking stack overflowed
2461     return;
2462   }
2463 
2464   assert(_markStack.isEmpty(), "Marking should have completed");
2465 
2466   // Unload Klasses, String, Symbols, Code Cache, etc.
2467   {
2468     G1CMTraceTime trace("Unloading", G1Log::finer());
2469 
2470     if (ClassUnloadingWithConcurrentMark) {
2471       bool purged_classes;
2472 
2473       {
2474         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2475         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2476       }
2477 
2478       {
2479         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2480         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2481       }
2482     }
2483 
2484     if (G1StringDedup::is_enabled()) {
2485       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2486       G1StringDedup::unlink(&g1_is_alive);
2487     }
2488   }
2489 }
2490 
2491 void ConcurrentMark::swapMarkBitMaps() {
2492   CMBitMapRO* temp = _prevMarkBitMap;
2493   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2494   _nextMarkBitMap  = (CMBitMap*)  temp;
2495 }
2496 
2497 // Closure for marking entries in SATB buffers.
2498 class CMSATBBufferClosure : public SATBBufferClosure {
2499 private:
2500   CMTask* _task;
2501   G1CollectedHeap* _g1h;
2502 
2503   // This is very similar to CMTask::deal_with_reference, but with
2504   // more relaxed requirements for the argument, so this must be more
2505   // circumspect about treating the argument as an object.
2506   void do_entry(void* entry) const {
2507     _task->increment_refs_reached();
2508     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2509     if (entry < hr->next_top_at_mark_start()) {
2510       // Until we get here, we don't know whether entry refers to a valid
2511       // object; it could instead have been a stale reference.
2512       oop obj = static_cast<oop>(entry);
2513       assert(obj->is_oop(true /* ignore mark word */),
2514              err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
2515       _task->make_reference_grey(obj, hr);
2516     }
2517   }
2518 
2519 public:
2520   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2521     : _task(task), _g1h(g1h) { }
2522 
2523   virtual void do_buffer(void** buffer, size_t size) {
2524     for (size_t i = 0; i < size; ++i) {
2525       do_entry(buffer[i]);
2526     }
2527   }
2528 };
2529 
2530 class G1RemarkThreadsClosure : public ThreadClosure {
2531   CMSATBBufferClosure _cm_satb_cl;
2532   G1CMOopClosure _cm_cl;
2533   MarkingCodeBlobClosure _code_cl;
2534   int _thread_parity;
2535 
2536  public:
2537   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) :
2538     _cm_satb_cl(task, g1h),
2539     _cm_cl(g1h, g1h->concurrent_mark(), task),
2540     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2541     _thread_parity(Threads::thread_claim_parity()) {}
2542 
2543   void do_thread(Thread* thread) {
2544     if (thread->is_Java_thread()) {
2545       if (thread->claim_oops_do(true, _thread_parity)) {
2546         JavaThread* jt = (JavaThread*)thread;
2547 
2548         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2549         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2550         // * Alive if on the stack of an executing method
2551         // * Weakly reachable otherwise
2552         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2553         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2554         jt->nmethods_do(&_code_cl);
2555 
2556         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
2557       }
2558     } else if (thread->is_VM_thread()) {
2559       if (thread->claim_oops_do(true, _thread_parity)) {
2560         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
2561       }
2562     }
2563   }
2564 };
2565 
2566 class CMRemarkTask: public AbstractGangTask {
2567 private:
2568   ConcurrentMark* _cm;
2569 public:
2570   void work(uint worker_id) {
2571     // Since all available tasks are actually started, we should
2572     // only proceed if we're supposed to be active.
2573     if (worker_id < _cm->active_tasks()) {
2574       CMTask* task = _cm->task(worker_id);
2575       task->record_start_time();
2576       {
2577         ResourceMark rm;
2578         HandleMark hm;
2579 
2580         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2581         Threads::threads_do(&threads_f);
2582       }
2583 
2584       do {
2585         task->do_marking_step(1000000000.0 /* something very large */,
2586                               true         /* do_termination       */,
2587                               false        /* is_serial            */);
2588       } while (task->has_aborted() && !_cm->has_overflown());
2589       // If we overflow, then we do not want to restart. We instead
2590       // want to abort remark and do concurrent marking again.
2591       task->record_end_time();
2592     }
2593   }
2594 
2595   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2596     AbstractGangTask("Par Remark"), _cm(cm) {
2597     _cm->terminator()->reset_for_reuse(active_workers);
2598   }
2599 };
2600 
2601 void ConcurrentMark::checkpointRootsFinalWork() {
2602   ResourceMark rm;
2603   HandleMark   hm;
2604   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2605 
2606   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2607 
2608   g1h->ensure_parsability(false);
2609 
2610   // this is remark, so we'll use up all active threads
2611   uint active_workers = g1h->workers()->active_workers();
2612   if (active_workers == 0) {
2613     assert(active_workers > 0, "Should have been set earlier");
2614     active_workers = (uint) ParallelGCThreads;
2615     g1h->workers()->set_active_workers(active_workers);
2616   }
2617   set_concurrency_and_phase(active_workers, false /* concurrent */);
2618   // Leave _parallel_marking_threads at it's
2619   // value originally calculated in the ConcurrentMark
2620   // constructor and pass values of the active workers
2621   // through the gang in the task.
2622 
2623   {
2624     StrongRootsScope srs(active_workers);
2625 
2626     CMRemarkTask remarkTask(this, active_workers);
2627     // We will start all available threads, even if we decide that the
2628     // active_workers will be fewer. The extra ones will just bail out
2629     // immediately.
2630     g1h->set_par_threads(active_workers);
2631     g1h->workers()->run_task(&remarkTask);
2632     g1h->set_par_threads(0);
2633   }
2634 
2635   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2636   guarantee(has_overflown() ||
2637             satb_mq_set.completed_buffers_num() == 0,
2638             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2639                     BOOL_TO_STR(has_overflown()),
2640                     satb_mq_set.completed_buffers_num()));
2641 
2642   print_stats();
2643 }
2644 
2645 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2646   // Note we are overriding the read-only view of the prev map here, via
2647   // the cast.
2648   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2649 }
2650 
2651 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2652   _nextMarkBitMap->clearRange(mr);
2653 }
2654 
2655 HeapRegion*
2656 ConcurrentMark::claim_region(uint worker_id) {
2657   // "checkpoint" the finger
2658   HeapWord* finger = _finger;
2659 
2660   // _heap_end will not change underneath our feet; it only changes at
2661   // yield points.
2662   while (finger < _heap_end) {
2663     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2664 
2665     // Note on how this code handles humongous regions. In the
2666     // normal case the finger will reach the start of a "starts
2667     // humongous" (SH) region. Its end will either be the end of the
2668     // last "continues humongous" (CH) region in the sequence, or the
2669     // standard end of the SH region (if the SH is the only region in
2670     // the sequence). That way claim_region() will skip over the CH
2671     // regions. However, there is a subtle race between a CM thread
2672     // executing this method and a mutator thread doing a humongous
2673     // object allocation. The two are not mutually exclusive as the CM
2674     // thread does not need to hold the Heap_lock when it gets
2675     // here. So there is a chance that claim_region() will come across
2676     // a free region that's in the progress of becoming a SH or a CH
2677     // region. In the former case, it will either
2678     //   a) Miss the update to the region's end, in which case it will
2679     //      visit every subsequent CH region, will find their bitmaps
2680     //      empty, and do nothing, or
2681     //   b) Will observe the update of the region's end (in which case
2682     //      it will skip the subsequent CH regions).
2683     // If it comes across a region that suddenly becomes CH, the
2684     // scenario will be similar to b). So, the race between
2685     // claim_region() and a humongous object allocation might force us
2686     // to do a bit of unnecessary work (due to some unnecessary bitmap
2687     // iterations) but it should not introduce and correctness issues.
2688     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2689 
2690     // Above heap_region_containing_raw may return NULL as we always scan claim
2691     // until the end of the heap. In this case, just jump to the next region.
2692     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2693 
2694     // Is the gap between reading the finger and doing the CAS too long?
2695     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2696     if (res == finger && curr_region != NULL) {
2697       // we succeeded
2698       HeapWord*   bottom        = curr_region->bottom();
2699       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2700 
2701       if (verbose_low()) {
2702         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2703                                "["PTR_FORMAT", "PTR_FORMAT"), "
2704                                "limit = "PTR_FORMAT,
2705                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2706       }
2707 
2708       // notice that _finger == end cannot be guaranteed here since,
2709       // someone else might have moved the finger even further
2710       assert(_finger >= end, "the finger should have moved forward");
2711 
2712       if (verbose_low()) {
2713         gclog_or_tty->print_cr("[%u] we were successful with region = "
2714                                PTR_FORMAT, worker_id, p2i(curr_region));
2715       }
2716 
2717       if (limit > bottom) {
2718         if (verbose_low()) {
2719           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2720                                  "returning it ", worker_id, p2i(curr_region));
2721         }
2722         return curr_region;
2723       } else {
2724         assert(limit == bottom,
2725                "the region limit should be at bottom");
2726         if (verbose_low()) {
2727           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2728                                  "returning NULL", worker_id, p2i(curr_region));
2729         }
2730         // we return NULL and the caller should try calling
2731         // claim_region() again.
2732         return NULL;
2733       }
2734     } else {
2735       assert(_finger > finger, "the finger should have moved forward");
2736       if (verbose_low()) {
2737         if (curr_region == NULL) {
2738           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2739                                  "global finger = "PTR_FORMAT", "
2740                                  "our finger = "PTR_FORMAT,
2741                                  worker_id, p2i(_finger), p2i(finger));
2742         } else {
2743           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2744                                  "global finger = "PTR_FORMAT", "
2745                                  "our finger = "PTR_FORMAT,
2746                                  worker_id, p2i(_finger), p2i(finger));
2747         }
2748       }
2749 
2750       // read it again
2751       finger = _finger;
2752     }
2753   }
2754 
2755   return NULL;
2756 }
2757 
2758 #ifndef PRODUCT
2759 enum VerifyNoCSetOopsPhase {
2760   VerifyNoCSetOopsStack,
2761   VerifyNoCSetOopsQueues
2762 };
2763 
2764 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2765 private:
2766   G1CollectedHeap* _g1h;
2767   VerifyNoCSetOopsPhase _phase;
2768   int _info;
2769 
2770   const char* phase_str() {
2771     switch (_phase) {
2772     case VerifyNoCSetOopsStack:         return "Stack";
2773     case VerifyNoCSetOopsQueues:        return "Queue";
2774     default:                            ShouldNotReachHere();
2775     }
2776     return NULL;
2777   }
2778 
2779   void do_object_work(oop obj) {
2780     guarantee(!_g1h->obj_in_cs(obj),
2781               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2782                       p2i((void*) obj), phase_str(), _info));
2783   }
2784 
2785 public:
2786   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2787 
2788   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2789     _phase = phase;
2790     _info = info;
2791   }
2792 
2793   virtual void do_oop(oop* p) {
2794     oop obj = oopDesc::load_decode_heap_oop(p);
2795     do_object_work(obj);
2796   }
2797 
2798   virtual void do_oop(narrowOop* p) {
2799     // We should not come across narrow oops while scanning marking
2800     // stacks
2801     ShouldNotReachHere();
2802   }
2803 
2804   virtual void do_object(oop obj) {
2805     do_object_work(obj);
2806   }
2807 };
2808 
2809 void ConcurrentMark::verify_no_cset_oops() {
2810   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2811   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2812     return;
2813   }
2814 
2815   VerifyNoCSetOopsClosure cl;
2816 
2817   // Verify entries on the global mark stack
2818   cl.set_phase(VerifyNoCSetOopsStack);
2819   _markStack.oops_do(&cl);
2820 
2821   // Verify entries on the task queues
2822   for (uint i = 0; i < _max_worker_id; i += 1) {
2823     cl.set_phase(VerifyNoCSetOopsQueues, i);
2824     CMTaskQueue* queue = _task_queues->queue(i);
2825     queue->oops_do(&cl);
2826   }
2827 
2828   // Verify the global finger
2829   HeapWord* global_finger = finger();
2830   if (global_finger != NULL && global_finger < _heap_end) {
2831     // The global finger always points to a heap region boundary. We
2832     // use heap_region_containing_raw() to get the containing region
2833     // given that the global finger could be pointing to a free region
2834     // which subsequently becomes continues humongous. If that
2835     // happens, heap_region_containing() will return the bottom of the
2836     // corresponding starts humongous region and the check below will
2837     // not hold any more.
2838     // Since we always iterate over all regions, we might get a NULL HeapRegion
2839     // here.
2840     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2841     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2842               err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2843                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2844   }
2845 
2846   // Verify the task fingers
2847   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2848   for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2849     CMTask* task = _tasks[i];
2850     HeapWord* task_finger = task->finger();
2851     if (task_finger != NULL && task_finger < _heap_end) {
2852       // See above note on the global finger verification.
2853       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2854       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2855                 !task_hr->in_collection_set(),
2856                 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2857                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2858     }
2859   }
2860 }
2861 #endif // PRODUCT
2862 
2863 // Aggregate the counting data that was constructed concurrently
2864 // with marking.
2865 class AggregateCountDataHRClosure: public HeapRegionClosure {
2866   G1CollectedHeap* _g1h;
2867   ConcurrentMark* _cm;
2868   CardTableModRefBS* _ct_bs;
2869   BitMap* _cm_card_bm;
2870   uint _max_worker_id;
2871 
2872  public:
2873   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2874                               BitMap* cm_card_bm,
2875                               uint max_worker_id) :
2876     _g1h(g1h), _cm(g1h->concurrent_mark()),
2877     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2878     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2879 
2880   bool doHeapRegion(HeapRegion* hr) {
2881     if (hr->is_continues_humongous()) {
2882       // We will ignore these here and process them when their
2883       // associated "starts humongous" region is processed.
2884       // Note that we cannot rely on their associated
2885       // "starts humongous" region to have their bit set to 1
2886       // since, due to the region chunking in the parallel region
2887       // iteration, a "continues humongous" region might be visited
2888       // before its associated "starts humongous".
2889       return false;
2890     }
2891 
2892     HeapWord* start = hr->bottom();
2893     HeapWord* limit = hr->next_top_at_mark_start();
2894     HeapWord* end = hr->end();
2895 
2896     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2897            err_msg("Preconditions not met - "
2898                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
2899                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
2900                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2901 
2902     assert(hr->next_marked_bytes() == 0, "Precondition");
2903 
2904     if (start == limit) {
2905       // NTAMS of this region has not been set so nothing to do.
2906       return false;
2907     }
2908 
2909     // 'start' should be in the heap.
2910     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2911     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2912     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2913 
2914     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2915     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2916     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2917 
2918     // If ntams is not card aligned then we bump card bitmap index
2919     // for limit so that we get the all the cards spanned by
2920     // the object ending at ntams.
2921     // Note: if this is the last region in the heap then ntams
2922     // could be actually just beyond the end of the the heap;
2923     // limit_idx will then  correspond to a (non-existent) card
2924     // that is also outside the heap.
2925     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
2926       limit_idx += 1;
2927     }
2928 
2929     assert(limit_idx <= end_idx, "or else use atomics");
2930 
2931     // Aggregate the "stripe" in the count data associated with hr.
2932     uint hrm_index = hr->hrm_index();
2933     size_t marked_bytes = 0;
2934 
2935     for (uint i = 0; i < _max_worker_id; i += 1) {
2936       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
2937       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
2938 
2939       // Fetch the marked_bytes in this region for task i and
2940       // add it to the running total for this region.
2941       marked_bytes += marked_bytes_array[hrm_index];
2942 
2943       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
2944       // into the global card bitmap.
2945       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
2946 
2947       while (scan_idx < limit_idx) {
2948         assert(task_card_bm->at(scan_idx) == true, "should be");
2949         _cm_card_bm->set_bit(scan_idx);
2950         assert(_cm_card_bm->at(scan_idx) == true, "should be");
2951 
2952         // BitMap::get_next_one_offset() can handle the case when
2953         // its left_offset parameter is greater than its right_offset
2954         // parameter. It does, however, have an early exit if
2955         // left_offset == right_offset. So let's limit the value
2956         // passed in for left offset here.
2957         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
2958         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
2959       }
2960     }
2961 
2962     // Update the marked bytes for this region.
2963     hr->add_to_marked_bytes(marked_bytes);
2964 
2965     // Next heap region
2966     return false;
2967   }
2968 };
2969 
2970 class G1AggregateCountDataTask: public AbstractGangTask {
2971 protected:
2972   G1CollectedHeap* _g1h;
2973   ConcurrentMark* _cm;
2974   BitMap* _cm_card_bm;
2975   uint _max_worker_id;
2976   uint _active_workers;
2977   HeapRegionClaimer _hrclaimer;
2978 
2979 public:
2980   G1AggregateCountDataTask(G1CollectedHeap* g1h,
2981                            ConcurrentMark* cm,
2982                            BitMap* cm_card_bm,
2983                            uint max_worker_id,
2984                            uint n_workers) :
2985       AbstractGangTask("Count Aggregation"),
2986       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
2987       _max_worker_id(max_worker_id),
2988       _active_workers(n_workers),
2989       _hrclaimer(_active_workers) {
2990   }
2991 
2992   void work(uint worker_id) {
2993     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
2994 
2995     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
2996   }
2997 };
2998 
2999 
3000 void ConcurrentMark::aggregate_count_data() {
3001   uint n_workers = _g1h->workers()->active_workers();
3002 
3003   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3004                                            _max_worker_id, n_workers);
3005 
3006   _g1h->set_par_threads(n_workers);
3007   _g1h->workers()->run_task(&g1_par_agg_task);
3008   _g1h->set_par_threads(0);
3009 }
3010 
3011 // Clear the per-worker arrays used to store the per-region counting data
3012 void ConcurrentMark::clear_all_count_data() {
3013   // Clear the global card bitmap - it will be filled during
3014   // liveness count aggregation (during remark) and the
3015   // final counting task.
3016   _card_bm.clear();
3017 
3018   // Clear the global region bitmap - it will be filled as part
3019   // of the final counting task.
3020   _region_bm.clear();
3021 
3022   uint max_regions = _g1h->max_regions();
3023   assert(_max_worker_id > 0, "uninitialized");
3024 
3025   for (uint i = 0; i < _max_worker_id; i += 1) {
3026     BitMap* task_card_bm = count_card_bitmap_for(i);
3027     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3028 
3029     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3030     assert(marked_bytes_array != NULL, "uninitialized");
3031 
3032     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3033     task_card_bm->clear();
3034   }
3035 }
3036 
3037 void ConcurrentMark::print_stats() {
3038   if (verbose_stats()) {
3039     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3040     for (size_t i = 0; i < _active_tasks; ++i) {
3041       _tasks[i]->print_stats();
3042       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3043     }
3044   }
3045 }
3046 
3047 // abandon current marking iteration due to a Full GC
3048 void ConcurrentMark::abort() {
3049   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3050   // concurrent bitmap clearing.
3051   _nextMarkBitMap->clearAll();
3052 
3053   // Note we cannot clear the previous marking bitmap here
3054   // since VerifyDuringGC verifies the objects marked during
3055   // a full GC against the previous bitmap.
3056 
3057   // Clear the liveness counting data
3058   clear_all_count_data();
3059   // Empty mark stack
3060   reset_marking_state();
3061   for (uint i = 0; i < _max_worker_id; ++i) {
3062     _tasks[i]->clear_region_fields();
3063   }
3064   _first_overflow_barrier_sync.abort();
3065   _second_overflow_barrier_sync.abort();
3066   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3067   if (!gc_id.is_undefined()) {
3068     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3069     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3070     _aborted_gc_id = gc_id;
3071    }
3072   _has_aborted = true;
3073 
3074   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3075   satb_mq_set.abandon_partial_marking();
3076   // This can be called either during or outside marking, we'll read
3077   // the expected_active value from the SATB queue set.
3078   satb_mq_set.set_active_all_threads(
3079                                  false, /* new active value */
3080                                  satb_mq_set.is_active() /* expected_active */);
3081 
3082   _g1h->trace_heap_after_concurrent_cycle();
3083   _g1h->register_concurrent_cycle_end();
3084 }
3085 
3086 const GCId& ConcurrentMark::concurrent_gc_id() {
3087   if (has_aborted()) {
3088     return _aborted_gc_id;
3089   }
3090   return _g1h->gc_tracer_cm()->gc_id();
3091 }
3092 
3093 static void print_ms_time_info(const char* prefix, const char* name,
3094                                NumberSeq& ns) {
3095   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3096                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3097   if (ns.num() > 0) {
3098     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3099                            prefix, ns.sd(), ns.maximum());
3100   }
3101 }
3102 
3103 void ConcurrentMark::print_summary_info() {
3104   gclog_or_tty->print_cr(" Concurrent marking:");
3105   print_ms_time_info("  ", "init marks", _init_times);
3106   print_ms_time_info("  ", "remarks", _remark_times);
3107   {
3108     print_ms_time_info("     ", "final marks", _remark_mark_times);
3109     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3110 
3111   }
3112   print_ms_time_info("  ", "cleanups", _cleanup_times);
3113   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3114                          _total_counting_time,
3115                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3116                           (double)_cleanup_times.num()
3117                          : 0.0));
3118   if (G1ScrubRemSets) {
3119     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3120                            _total_rs_scrub_time,
3121                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3122                             (double)_cleanup_times.num()
3123                            : 0.0));
3124   }
3125   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3126                          (_init_times.sum() + _remark_times.sum() +
3127                           _cleanup_times.sum())/1000.0);
3128   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3129                 "(%8.2f s marking).",
3130                 cmThread()->vtime_accum(),
3131                 cmThread()->vtime_mark_accum());
3132 }
3133 
3134 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3135   _parallel_workers->print_worker_threads_on(st);
3136 }
3137 
3138 void ConcurrentMark::print_on_error(outputStream* st) const {
3139   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3140       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3141   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3142   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3143 }
3144 
3145 // We take a break if someone is trying to stop the world.
3146 bool ConcurrentMark::do_yield_check(uint worker_id) {
3147   if (SuspendibleThreadSet::should_yield()) {
3148     if (worker_id == 0) {
3149       _g1h->g1_policy()->record_concurrent_pause();
3150     }
3151     SuspendibleThreadSet::yield();
3152     return true;
3153   } else {
3154     return false;
3155   }
3156 }
3157 
3158 #ifndef PRODUCT
3159 // for debugging purposes
3160 void ConcurrentMark::print_finger() {
3161   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3162                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3163   for (uint i = 0; i < _max_worker_id; ++i) {
3164     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3165   }
3166   gclog_or_tty->cr();
3167 }
3168 #endif
3169 
3170 template<bool scan>
3171 inline void CMTask::process_grey_object(oop obj) {
3172   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3173   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3174 
3175   if (_cm->verbose_high()) {
3176     gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3177                            _worker_id, p2i((void*) obj));
3178   }
3179 
3180   size_t obj_size = obj->size();
3181   _words_scanned += obj_size;
3182 
3183   if (scan) {
3184     obj->oop_iterate(_cm_oop_closure);
3185   }
3186   statsOnly( ++_objs_scanned );
3187   check_limits();
3188 }
3189 
3190 template void CMTask::process_grey_object<true>(oop);
3191 template void CMTask::process_grey_object<false>(oop);
3192 
3193 // Closure for iteration over bitmaps
3194 class CMBitMapClosure : public BitMapClosure {
3195 private:
3196   // the bitmap that is being iterated over
3197   CMBitMap*                   _nextMarkBitMap;
3198   ConcurrentMark*             _cm;
3199   CMTask*                     _task;
3200 
3201 public:
3202   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3203     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3204 
3205   bool do_bit(size_t offset) {
3206     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3207     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3208     assert( addr < _cm->finger(), "invariant");
3209 
3210     statsOnly( _task->increase_objs_found_on_bitmap() );
3211     assert(addr >= _task->finger(), "invariant");
3212 
3213     // We move that task's local finger along.
3214     _task->move_finger_to(addr);
3215 
3216     _task->scan_object(oop(addr));
3217     // we only partially drain the local queue and global stack
3218     _task->drain_local_queue(true);
3219     _task->drain_global_stack(true);
3220 
3221     // if the has_aborted flag has been raised, we need to bail out of
3222     // the iteration
3223     return !_task->has_aborted();
3224   }
3225 };
3226 
3227 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3228                                ConcurrentMark* cm,
3229                                CMTask* task)
3230   : _g1h(g1h), _cm(cm), _task(task) {
3231   assert(_ref_processor == NULL, "should be initialized to NULL");
3232 
3233   if (G1UseConcMarkReferenceProcessing) {
3234     _ref_processor = g1h->ref_processor_cm();
3235     assert(_ref_processor != NULL, "should not be NULL");
3236   }
3237 }
3238 
3239 void CMTask::setup_for_region(HeapRegion* hr) {
3240   assert(hr != NULL,
3241         "claim_region() should have filtered out NULL regions");
3242   assert(!hr->is_continues_humongous(),
3243         "claim_region() should have filtered out continues humongous regions");
3244 
3245   if (_cm->verbose_low()) {
3246     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3247                            _worker_id, p2i(hr));
3248   }
3249 
3250   _curr_region  = hr;
3251   _finger       = hr->bottom();
3252   update_region_limit();
3253 }
3254 
3255 void CMTask::update_region_limit() {
3256   HeapRegion* hr            = _curr_region;
3257   HeapWord* bottom          = hr->bottom();
3258   HeapWord* limit           = hr->next_top_at_mark_start();
3259 
3260   if (limit == bottom) {
3261     if (_cm->verbose_low()) {
3262       gclog_or_tty->print_cr("[%u] found an empty region "
3263                              "["PTR_FORMAT", "PTR_FORMAT")",
3264                              _worker_id, p2i(bottom), p2i(limit));
3265     }
3266     // The region was collected underneath our feet.
3267     // We set the finger to bottom to ensure that the bitmap
3268     // iteration that will follow this will not do anything.
3269     // (this is not a condition that holds when we set the region up,
3270     // as the region is not supposed to be empty in the first place)
3271     _finger = bottom;
3272   } else if (limit >= _region_limit) {
3273     assert(limit >= _finger, "peace of mind");
3274   } else {
3275     assert(limit < _region_limit, "only way to get here");
3276     // This can happen under some pretty unusual circumstances.  An
3277     // evacuation pause empties the region underneath our feet (NTAMS
3278     // at bottom). We then do some allocation in the region (NTAMS
3279     // stays at bottom), followed by the region being used as a GC
3280     // alloc region (NTAMS will move to top() and the objects
3281     // originally below it will be grayed). All objects now marked in
3282     // the region are explicitly grayed, if below the global finger,
3283     // and we do not need in fact to scan anything else. So, we simply
3284     // set _finger to be limit to ensure that the bitmap iteration
3285     // doesn't do anything.
3286     _finger = limit;
3287   }
3288 
3289   _region_limit = limit;
3290 }
3291 
3292 void CMTask::giveup_current_region() {
3293   assert(_curr_region != NULL, "invariant");
3294   if (_cm->verbose_low()) {
3295     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3296                            _worker_id, p2i(_curr_region));
3297   }
3298   clear_region_fields();
3299 }
3300 
3301 void CMTask::clear_region_fields() {
3302   // Values for these three fields that indicate that we're not
3303   // holding on to a region.
3304   _curr_region   = NULL;
3305   _finger        = NULL;
3306   _region_limit  = NULL;
3307 }
3308 
3309 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3310   if (cm_oop_closure == NULL) {
3311     assert(_cm_oop_closure != NULL, "invariant");
3312   } else {
3313     assert(_cm_oop_closure == NULL, "invariant");
3314   }
3315   _cm_oop_closure = cm_oop_closure;
3316 }
3317 
3318 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3319   guarantee(nextMarkBitMap != NULL, "invariant");
3320 
3321   if (_cm->verbose_low()) {
3322     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3323   }
3324 
3325   _nextMarkBitMap                = nextMarkBitMap;
3326   clear_region_fields();
3327 
3328   _calls                         = 0;
3329   _elapsed_time_ms               = 0.0;
3330   _termination_time_ms           = 0.0;
3331   _termination_start_time_ms     = 0.0;
3332 
3333 #if _MARKING_STATS_
3334   _aborted                       = 0;
3335   _aborted_overflow              = 0;
3336   _aborted_cm_aborted            = 0;
3337   _aborted_yield                 = 0;
3338   _aborted_timed_out             = 0;
3339   _aborted_satb                  = 0;
3340   _aborted_termination           = 0;
3341   _steal_attempts                = 0;
3342   _steals                        = 0;
3343   _local_pushes                  = 0;
3344   _local_pops                    = 0;
3345   _local_max_size                = 0;
3346   _objs_scanned                  = 0;
3347   _global_pushes                 = 0;
3348   _global_pops                   = 0;
3349   _global_max_size               = 0;
3350   _global_transfers_to           = 0;
3351   _global_transfers_from         = 0;
3352   _regions_claimed               = 0;
3353   _objs_found_on_bitmap          = 0;
3354   _satb_buffers_processed        = 0;
3355 #endif // _MARKING_STATS_
3356 }
3357 
3358 bool CMTask::should_exit_termination() {
3359   regular_clock_call();
3360   // This is called when we are in the termination protocol. We should
3361   // quit if, for some reason, this task wants to abort or the global
3362   // stack is not empty (this means that we can get work from it).
3363   return !_cm->mark_stack_empty() || has_aborted();
3364 }
3365 
3366 void CMTask::reached_limit() {
3367   assert(_words_scanned >= _words_scanned_limit ||
3368          _refs_reached >= _refs_reached_limit ,
3369          "shouldn't have been called otherwise");
3370   regular_clock_call();
3371 }
3372 
3373 void CMTask::regular_clock_call() {
3374   if (has_aborted()) return;
3375 
3376   // First, we need to recalculate the words scanned and refs reached
3377   // limits for the next clock call.
3378   recalculate_limits();
3379 
3380   // During the regular clock call we do the following
3381 
3382   // (1) If an overflow has been flagged, then we abort.
3383   if (_cm->has_overflown()) {
3384     set_has_aborted();
3385     return;
3386   }
3387 
3388   // If we are not concurrent (i.e. we're doing remark) we don't need
3389   // to check anything else. The other steps are only needed during
3390   // the concurrent marking phase.
3391   if (!concurrent()) return;
3392 
3393   // (2) If marking has been aborted for Full GC, then we also abort.
3394   if (_cm->has_aborted()) {
3395     set_has_aborted();
3396     statsOnly( ++_aborted_cm_aborted );
3397     return;
3398   }
3399 
3400   double curr_time_ms = os::elapsedVTime() * 1000.0;
3401 
3402   // (3) If marking stats are enabled, then we update the step history.
3403 #if _MARKING_STATS_
3404   if (_words_scanned >= _words_scanned_limit) {
3405     ++_clock_due_to_scanning;
3406   }
3407   if (_refs_reached >= _refs_reached_limit) {
3408     ++_clock_due_to_marking;
3409   }
3410 
3411   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3412   _interval_start_time_ms = curr_time_ms;
3413   _all_clock_intervals_ms.add(last_interval_ms);
3414 
3415   if (_cm->verbose_medium()) {
3416       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3417                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3418                         _worker_id, last_interval_ms,
3419                         _words_scanned,
3420                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3421                         _refs_reached,
3422                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3423   }
3424 #endif // _MARKING_STATS_
3425 
3426   // (4) We check whether we should yield. If we have to, then we abort.
3427   if (SuspendibleThreadSet::should_yield()) {
3428     // We should yield. To do this we abort the task. The caller is
3429     // responsible for yielding.
3430     set_has_aborted();
3431     statsOnly( ++_aborted_yield );
3432     return;
3433   }
3434 
3435   // (5) We check whether we've reached our time quota. If we have,
3436   // then we abort.
3437   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3438   if (elapsed_time_ms > _time_target_ms) {
3439     set_has_aborted();
3440     _has_timed_out = true;
3441     statsOnly( ++_aborted_timed_out );
3442     return;
3443   }
3444 
3445   // (6) Finally, we check whether there are enough completed STAB
3446   // buffers available for processing. If there are, we abort.
3447   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3448   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3449     if (_cm->verbose_low()) {
3450       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3451                              _worker_id);
3452     }
3453     // we do need to process SATB buffers, we'll abort and restart
3454     // the marking task to do so
3455     set_has_aborted();
3456     statsOnly( ++_aborted_satb );
3457     return;
3458   }
3459 }
3460 
3461 void CMTask::recalculate_limits() {
3462   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3463   _words_scanned_limit      = _real_words_scanned_limit;
3464 
3465   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3466   _refs_reached_limit       = _real_refs_reached_limit;
3467 }
3468 
3469 void CMTask::decrease_limits() {
3470   // This is called when we believe that we're going to do an infrequent
3471   // operation which will increase the per byte scanned cost (i.e. move
3472   // entries to/from the global stack). It basically tries to decrease the
3473   // scanning limit so that the clock is called earlier.
3474 
3475   if (_cm->verbose_medium()) {
3476     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3477   }
3478 
3479   _words_scanned_limit = _real_words_scanned_limit -
3480     3 * words_scanned_period / 4;
3481   _refs_reached_limit  = _real_refs_reached_limit -
3482     3 * refs_reached_period / 4;
3483 }
3484 
3485 void CMTask::move_entries_to_global_stack() {
3486   // local array where we'll store the entries that will be popped
3487   // from the local queue
3488   oop buffer[global_stack_transfer_size];
3489 
3490   int n = 0;
3491   oop obj;
3492   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3493     buffer[n] = obj;
3494     ++n;
3495   }
3496 
3497   if (n > 0) {
3498     // we popped at least one entry from the local queue
3499 
3500     statsOnly( ++_global_transfers_to; _local_pops += n );
3501 
3502     if (!_cm->mark_stack_push(buffer, n)) {
3503       if (_cm->verbose_low()) {
3504         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3505                                _worker_id);
3506       }
3507       set_has_aborted();
3508     } else {
3509       // the transfer was successful
3510 
3511       if (_cm->verbose_medium()) {
3512         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3513                                _worker_id, n);
3514       }
3515       statsOnly( size_t tmp_size = _cm->mark_stack_size();
3516                  if (tmp_size > _global_max_size) {
3517                    _global_max_size = tmp_size;
3518                  }
3519                  _global_pushes += n );
3520     }
3521   }
3522 
3523   // this operation was quite expensive, so decrease the limits
3524   decrease_limits();
3525 }
3526 
3527 void CMTask::get_entries_from_global_stack() {
3528   // local array where we'll store the entries that will be popped
3529   // from the global stack.
3530   oop buffer[global_stack_transfer_size];
3531   int n;
3532   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3533   assert(n <= global_stack_transfer_size,
3534          "we should not pop more than the given limit");
3535   if (n > 0) {
3536     // yes, we did actually pop at least one entry
3537 
3538     statsOnly( ++_global_transfers_from; _global_pops += n );
3539     if (_cm->verbose_medium()) {
3540       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3541                              _worker_id, n);
3542     }
3543     for (int i = 0; i < n; ++i) {
3544       bool success = _task_queue->push(buffer[i]);
3545       // We only call this when the local queue is empty or under a
3546       // given target limit. So, we do not expect this push to fail.
3547       assert(success, "invariant");
3548     }
3549 
3550     statsOnly( size_t tmp_size = (size_t)_task_queue->size();
3551                if (tmp_size > _local_max_size) {
3552                  _local_max_size = tmp_size;
3553                }
3554                _local_pushes += n );
3555   }
3556 
3557   // this operation was quite expensive, so decrease the limits
3558   decrease_limits();
3559 }
3560 
3561 void CMTask::drain_local_queue(bool partially) {
3562   if (has_aborted()) return;
3563 
3564   // Decide what the target size is, depending whether we're going to
3565   // drain it partially (so that other tasks can steal if they run out
3566   // of things to do) or totally (at the very end).
3567   size_t target_size;
3568   if (partially) {
3569     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3570   } else {
3571     target_size = 0;
3572   }
3573 
3574   if (_task_queue->size() > target_size) {
3575     if (_cm->verbose_high()) {
3576       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3577                              _worker_id, target_size);
3578     }
3579 
3580     oop obj;
3581     bool ret = _task_queue->pop_local(obj);
3582     while (ret) {
3583       statsOnly( ++_local_pops );
3584 
3585       if (_cm->verbose_high()) {
3586         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3587                                p2i((void*) obj));
3588       }
3589 
3590       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3591       assert(!_g1h->is_on_master_free_list(
3592                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3593 
3594       scan_object(obj);
3595 
3596       if (_task_queue->size() <= target_size || has_aborted()) {
3597         ret = false;
3598       } else {
3599         ret = _task_queue->pop_local(obj);
3600       }
3601     }
3602 
3603     if (_cm->verbose_high()) {
3604       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3605                              _worker_id, _task_queue->size());
3606     }
3607   }
3608 }
3609 
3610 void CMTask::drain_global_stack(bool partially) {
3611   if (has_aborted()) return;
3612 
3613   // We have a policy to drain the local queue before we attempt to
3614   // drain the global stack.
3615   assert(partially || _task_queue->size() == 0, "invariant");
3616 
3617   // Decide what the target size is, depending whether we're going to
3618   // drain it partially (so that other tasks can steal if they run out
3619   // of things to do) or totally (at the very end).  Notice that,
3620   // because we move entries from the global stack in chunks or
3621   // because another task might be doing the same, we might in fact
3622   // drop below the target. But, this is not a problem.
3623   size_t target_size;
3624   if (partially) {
3625     target_size = _cm->partial_mark_stack_size_target();
3626   } else {
3627     target_size = 0;
3628   }
3629 
3630   if (_cm->mark_stack_size() > target_size) {
3631     if (_cm->verbose_low()) {
3632       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3633                              _worker_id, target_size);
3634     }
3635 
3636     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3637       get_entries_from_global_stack();
3638       drain_local_queue(partially);
3639     }
3640 
3641     if (_cm->verbose_low()) {
3642       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3643                              _worker_id, _cm->mark_stack_size());
3644     }
3645   }
3646 }
3647 
3648 // SATB Queue has several assumptions on whether to call the par or
3649 // non-par versions of the methods. this is why some of the code is
3650 // replicated. We should really get rid of the single-threaded version
3651 // of the code to simplify things.
3652 void CMTask::drain_satb_buffers() {
3653   if (has_aborted()) return;
3654 
3655   // We set this so that the regular clock knows that we're in the
3656   // middle of draining buffers and doesn't set the abort flag when it
3657   // notices that SATB buffers are available for draining. It'd be
3658   // very counter productive if it did that. :-)
3659   _draining_satb_buffers = true;
3660 
3661   CMSATBBufferClosure satb_cl(this, _g1h);
3662   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3663 
3664   // This keeps claiming and applying the closure to completed buffers
3665   // until we run out of buffers or we need to abort.
3666   while (!has_aborted() &&
3667          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3668     if (_cm->verbose_medium()) {
3669       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3670     }
3671     statsOnly( ++_satb_buffers_processed );
3672     regular_clock_call();
3673   }
3674 
3675   _draining_satb_buffers = false;
3676 
3677   assert(has_aborted() ||
3678          concurrent() ||
3679          satb_mq_set.completed_buffers_num() == 0, "invariant");
3680 
3681   // again, this was a potentially expensive operation, decrease the
3682   // limits to get the regular clock call early
3683   decrease_limits();
3684 }
3685 
3686 void CMTask::print_stats() {
3687   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3688                          _worker_id, _calls);
3689   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3690                          _elapsed_time_ms, _termination_time_ms);
3691   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3692                          _step_times_ms.num(), _step_times_ms.avg(),
3693                          _step_times_ms.sd());
3694   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3695                          _step_times_ms.maximum(), _step_times_ms.sum());
3696 
3697 #if _MARKING_STATS_
3698   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3699                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3700                          _all_clock_intervals_ms.sd());
3701   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3702                          _all_clock_intervals_ms.maximum(),
3703                          _all_clock_intervals_ms.sum());
3704   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT,
3705                          _clock_due_to_scanning, _clock_due_to_marking);
3706   gclog_or_tty->print_cr("  Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT,
3707                          _objs_scanned, _objs_found_on_bitmap);
3708   gclog_or_tty->print_cr("  Local Queue:  pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3709                          _local_pushes, _local_pops, _local_max_size);
3710   gclog_or_tty->print_cr("  Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3711                          _global_pushes, _global_pops, _global_max_size);
3712   gclog_or_tty->print_cr("                transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT,
3713                          _global_transfers_to,_global_transfers_from);
3714   gclog_or_tty->print_cr("  Regions: claimed = " SIZE_FORMAT, _regions_claimed);
3715   gclog_or_tty->print_cr("  SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed);
3716   gclog_or_tty->print_cr("  Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT,
3717                          _steal_attempts, _steals);
3718   gclog_or_tty->print_cr("  Aborted: " SIZE_FORMAT ", due to", _aborted);
3719   gclog_or_tty->print_cr("    overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT,
3720                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3721   gclog_or_tty->print_cr("    time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT,
3722                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3723 #endif // _MARKING_STATS_
3724 }
3725 
3726 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3727   return _task_queues->steal(worker_id, hash_seed, obj);
3728 }
3729 
3730 /*****************************************************************************
3731 
3732     The do_marking_step(time_target_ms, ...) method is the building
3733     block of the parallel marking framework. It can be called in parallel
3734     with other invocations of do_marking_step() on different tasks
3735     (but only one per task, obviously) and concurrently with the
3736     mutator threads, or during remark, hence it eliminates the need
3737     for two versions of the code. When called during remark, it will
3738     pick up from where the task left off during the concurrent marking
3739     phase. Interestingly, tasks are also claimable during evacuation
3740     pauses too, since do_marking_step() ensures that it aborts before
3741     it needs to yield.
3742 
3743     The data structures that it uses to do marking work are the
3744     following:
3745 
3746       (1) Marking Bitmap. If there are gray objects that appear only
3747       on the bitmap (this happens either when dealing with an overflow
3748       or when the initial marking phase has simply marked the roots
3749       and didn't push them on the stack), then tasks claim heap
3750       regions whose bitmap they then scan to find gray objects. A
3751       global finger indicates where the end of the last claimed region
3752       is. A local finger indicates how far into the region a task has
3753       scanned. The two fingers are used to determine how to gray an
3754       object (i.e. whether simply marking it is OK, as it will be
3755       visited by a task in the future, or whether it needs to be also
3756       pushed on a stack).
3757 
3758       (2) Local Queue. The local queue of the task which is accessed
3759       reasonably efficiently by the task. Other tasks can steal from
3760       it when they run out of work. Throughout the marking phase, a
3761       task attempts to keep its local queue short but not totally
3762       empty, so that entries are available for stealing by other
3763       tasks. Only when there is no more work, a task will totally
3764       drain its local queue.
3765 
3766       (3) Global Mark Stack. This handles local queue overflow. During
3767       marking only sets of entries are moved between it and the local
3768       queues, as access to it requires a mutex and more fine-grain
3769       interaction with it which might cause contention. If it
3770       overflows, then the marking phase should restart and iterate
3771       over the bitmap to identify gray objects. Throughout the marking
3772       phase, tasks attempt to keep the global mark stack at a small
3773       length but not totally empty, so that entries are available for
3774       popping by other tasks. Only when there is no more work, tasks
3775       will totally drain the global mark stack.
3776 
3777       (4) SATB Buffer Queue. This is where completed SATB buffers are
3778       made available. Buffers are regularly removed from this queue
3779       and scanned for roots, so that the queue doesn't get too
3780       long. During remark, all completed buffers are processed, as
3781       well as the filled in parts of any uncompleted buffers.
3782 
3783     The do_marking_step() method tries to abort when the time target
3784     has been reached. There are a few other cases when the
3785     do_marking_step() method also aborts:
3786 
3787       (1) When the marking phase has been aborted (after a Full GC).
3788 
3789       (2) When a global overflow (on the global stack) has been
3790       triggered. Before the task aborts, it will actually sync up with
3791       the other tasks to ensure that all the marking data structures
3792       (local queues, stacks, fingers etc.)  are re-initialized so that
3793       when do_marking_step() completes, the marking phase can
3794       immediately restart.
3795 
3796       (3) When enough completed SATB buffers are available. The
3797       do_marking_step() method only tries to drain SATB buffers right
3798       at the beginning. So, if enough buffers are available, the
3799       marking step aborts and the SATB buffers are processed at
3800       the beginning of the next invocation.
3801 
3802       (4) To yield. when we have to yield then we abort and yield
3803       right at the end of do_marking_step(). This saves us from a lot
3804       of hassle as, by yielding we might allow a Full GC. If this
3805       happens then objects will be compacted underneath our feet, the
3806       heap might shrink, etc. We save checking for this by just
3807       aborting and doing the yield right at the end.
3808 
3809     From the above it follows that the do_marking_step() method should
3810     be called in a loop (or, otherwise, regularly) until it completes.
3811 
3812     If a marking step completes without its has_aborted() flag being
3813     true, it means it has completed the current marking phase (and
3814     also all other marking tasks have done so and have all synced up).
3815 
3816     A method called regular_clock_call() is invoked "regularly" (in
3817     sub ms intervals) throughout marking. It is this clock method that
3818     checks all the abort conditions which were mentioned above and
3819     decides when the task should abort. A work-based scheme is used to
3820     trigger this clock method: when the number of object words the
3821     marking phase has scanned or the number of references the marking
3822     phase has visited reach a given limit. Additional invocations to
3823     the method clock have been planted in a few other strategic places
3824     too. The initial reason for the clock method was to avoid calling
3825     vtime too regularly, as it is quite expensive. So, once it was in
3826     place, it was natural to piggy-back all the other conditions on it
3827     too and not constantly check them throughout the code.
3828 
3829     If do_termination is true then do_marking_step will enter its
3830     termination protocol.
3831 
3832     The value of is_serial must be true when do_marking_step is being
3833     called serially (i.e. by the VMThread) and do_marking_step should
3834     skip any synchronization in the termination and overflow code.
3835     Examples include the serial remark code and the serial reference
3836     processing closures.
3837 
3838     The value of is_serial must be false when do_marking_step is
3839     being called by any of the worker threads in a work gang.
3840     Examples include the concurrent marking code (CMMarkingTask),
3841     the MT remark code, and the MT reference processing closures.
3842 
3843  *****************************************************************************/
3844 
3845 void CMTask::do_marking_step(double time_target_ms,
3846                              bool do_termination,
3847                              bool is_serial) {
3848   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3849   assert(concurrent() == _cm->concurrent(), "they should be the same");
3850 
3851   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3852   assert(_task_queues != NULL, "invariant");
3853   assert(_task_queue != NULL, "invariant");
3854   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
3855 
3856   assert(!_claimed,
3857          "only one thread should claim this task at any one time");
3858 
3859   // OK, this doesn't safeguard again all possible scenarios, as it is
3860   // possible for two threads to set the _claimed flag at the same
3861   // time. But it is only for debugging purposes anyway and it will
3862   // catch most problems.
3863   _claimed = true;
3864 
3865   _start_time_ms = os::elapsedVTime() * 1000.0;
3866   statsOnly( _interval_start_time_ms = _start_time_ms );
3867 
3868   // If do_stealing is true then do_marking_step will attempt to
3869   // steal work from the other CMTasks. It only makes sense to
3870   // enable stealing when the termination protocol is enabled
3871   // and do_marking_step() is not being called serially.
3872   bool do_stealing = do_termination && !is_serial;
3873 
3874   double diff_prediction_ms =
3875     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
3876   _time_target_ms = time_target_ms - diff_prediction_ms;
3877 
3878   // set up the variables that are used in the work-based scheme to
3879   // call the regular clock method
3880   _words_scanned = 0;
3881   _refs_reached  = 0;
3882   recalculate_limits();
3883 
3884   // clear all flags
3885   clear_has_aborted();
3886   _has_timed_out = false;
3887   _draining_satb_buffers = false;
3888 
3889   ++_calls;
3890 
3891   if (_cm->verbose_low()) {
3892     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
3893                            "target = %1.2lfms >>>>>>>>>>",
3894                            _worker_id, _calls, _time_target_ms);
3895   }
3896 
3897   // Set up the bitmap and oop closures. Anything that uses them is
3898   // eventually called from this method, so it is OK to allocate these
3899   // statically.
3900   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
3901   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
3902   set_cm_oop_closure(&cm_oop_closure);
3903 
3904   if (_cm->has_overflown()) {
3905     // This can happen if the mark stack overflows during a GC pause
3906     // and this task, after a yield point, restarts. We have to abort
3907     // as we need to get into the overflow protocol which happens
3908     // right at the end of this task.
3909     set_has_aborted();
3910   }
3911 
3912   // First drain any available SATB buffers. After this, we will not
3913   // look at SATB buffers before the next invocation of this method.
3914   // If enough completed SATB buffers are queued up, the regular clock
3915   // will abort this task so that it restarts.
3916   drain_satb_buffers();
3917   // ...then partially drain the local queue and the global stack
3918   drain_local_queue(true);
3919   drain_global_stack(true);
3920 
3921   do {
3922     if (!has_aborted() && _curr_region != NULL) {
3923       // This means that we're already holding on to a region.
3924       assert(_finger != NULL, "if region is not NULL, then the finger "
3925              "should not be NULL either");
3926 
3927       // We might have restarted this task after an evacuation pause
3928       // which might have evacuated the region we're holding on to
3929       // underneath our feet. Let's read its limit again to make sure
3930       // that we do not iterate over a region of the heap that
3931       // contains garbage (update_region_limit() will also move
3932       // _finger to the start of the region if it is found empty).
3933       update_region_limit();
3934       // We will start from _finger not from the start of the region,
3935       // as we might be restarting this task after aborting half-way
3936       // through scanning this region. In this case, _finger points to
3937       // the address where we last found a marked object. If this is a
3938       // fresh region, _finger points to start().
3939       MemRegion mr = MemRegion(_finger, _region_limit);
3940 
3941       if (_cm->verbose_low()) {
3942         gclog_or_tty->print_cr("[%u] we're scanning part "
3943                                "["PTR_FORMAT", "PTR_FORMAT") "
3944                                "of region "HR_FORMAT,
3945                                _worker_id, p2i(_finger), p2i(_region_limit),
3946                                HR_FORMAT_PARAMS(_curr_region));
3947       }
3948 
3949       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
3950              "humongous regions should go around loop once only");
3951 
3952       // Some special cases:
3953       // If the memory region is empty, we can just give up the region.
3954       // If the current region is humongous then we only need to check
3955       // the bitmap for the bit associated with the start of the object,
3956       // scan the object if it's live, and give up the region.
3957       // Otherwise, let's iterate over the bitmap of the part of the region
3958       // that is left.
3959       // If the iteration is successful, give up the region.
3960       if (mr.is_empty()) {
3961         giveup_current_region();
3962         regular_clock_call();
3963       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
3964         if (_nextMarkBitMap->isMarked(mr.start())) {
3965           // The object is marked - apply the closure
3966           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
3967           bitmap_closure.do_bit(offset);
3968         }
3969         // Even if this task aborted while scanning the humongous object
3970         // we can (and should) give up the current region.
3971         giveup_current_region();
3972         regular_clock_call();
3973       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
3974         giveup_current_region();
3975         regular_clock_call();
3976       } else {
3977         assert(has_aborted(), "currently the only way to do so");
3978         // The only way to abort the bitmap iteration is to return
3979         // false from the do_bit() method. However, inside the
3980         // do_bit() method we move the _finger to point to the
3981         // object currently being looked at. So, if we bail out, we
3982         // have definitely set _finger to something non-null.
3983         assert(_finger != NULL, "invariant");
3984 
3985         // Region iteration was actually aborted. So now _finger
3986         // points to the address of the object we last scanned. If we
3987         // leave it there, when we restart this task, we will rescan
3988         // the object. It is easy to avoid this. We move the finger by
3989         // enough to point to the next possible object header (the
3990         // bitmap knows by how much we need to move it as it knows its
3991         // granularity).
3992         assert(_finger < _region_limit, "invariant");
3993         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
3994         // Check if bitmap iteration was aborted while scanning the last object
3995         if (new_finger >= _region_limit) {
3996           giveup_current_region();
3997         } else {
3998           move_finger_to(new_finger);
3999         }
4000       }
4001     }
4002     // At this point we have either completed iterating over the
4003     // region we were holding on to, or we have aborted.
4004 
4005     // We then partially drain the local queue and the global stack.
4006     // (Do we really need this?)
4007     drain_local_queue(true);
4008     drain_global_stack(true);
4009 
4010     // Read the note on the claim_region() method on why it might
4011     // return NULL with potentially more regions available for
4012     // claiming and why we have to check out_of_regions() to determine
4013     // whether we're done or not.
4014     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4015       // We are going to try to claim a new region. We should have
4016       // given up on the previous one.
4017       // Separated the asserts so that we know which one fires.
4018       assert(_curr_region  == NULL, "invariant");
4019       assert(_finger       == NULL, "invariant");
4020       assert(_region_limit == NULL, "invariant");
4021       if (_cm->verbose_low()) {
4022         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4023       }
4024       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4025       if (claimed_region != NULL) {
4026         // Yes, we managed to claim one
4027         statsOnly( ++_regions_claimed );
4028 
4029         if (_cm->verbose_low()) {
4030           gclog_or_tty->print_cr("[%u] we successfully claimed "
4031                                  "region "PTR_FORMAT,
4032                                  _worker_id, p2i(claimed_region));
4033         }
4034 
4035         setup_for_region(claimed_region);
4036         assert(_curr_region == claimed_region, "invariant");
4037       }
4038       // It is important to call the regular clock here. It might take
4039       // a while to claim a region if, for example, we hit a large
4040       // block of empty regions. So we need to call the regular clock
4041       // method once round the loop to make sure it's called
4042       // frequently enough.
4043       regular_clock_call();
4044     }
4045 
4046     if (!has_aborted() && _curr_region == NULL) {
4047       assert(_cm->out_of_regions(),
4048              "at this point we should be out of regions");
4049     }
4050   } while ( _curr_region != NULL && !has_aborted());
4051 
4052   if (!has_aborted()) {
4053     // We cannot check whether the global stack is empty, since other
4054     // tasks might be pushing objects to it concurrently.
4055     assert(_cm->out_of_regions(),
4056            "at this point we should be out of regions");
4057 
4058     if (_cm->verbose_low()) {
4059       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4060     }
4061 
4062     // Try to reduce the number of available SATB buffers so that
4063     // remark has less work to do.
4064     drain_satb_buffers();
4065   }
4066 
4067   // Since we've done everything else, we can now totally drain the
4068   // local queue and global stack.
4069   drain_local_queue(false);
4070   drain_global_stack(false);
4071 
4072   // Attempt at work stealing from other task's queues.
4073   if (do_stealing && !has_aborted()) {
4074     // We have not aborted. This means that we have finished all that
4075     // we could. Let's try to do some stealing...
4076 
4077     // We cannot check whether the global stack is empty, since other
4078     // tasks might be pushing objects to it concurrently.
4079     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4080            "only way to reach here");
4081 
4082     if (_cm->verbose_low()) {
4083       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4084     }
4085 
4086     while (!has_aborted()) {
4087       oop obj;
4088       statsOnly( ++_steal_attempts );
4089 
4090       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4091         if (_cm->verbose_medium()) {
4092           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4093                                  _worker_id, p2i((void*) obj));
4094         }
4095 
4096         statsOnly( ++_steals );
4097 
4098         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4099                "any stolen object should be marked");
4100         scan_object(obj);
4101 
4102         // And since we're towards the end, let's totally drain the
4103         // local queue and global stack.
4104         drain_local_queue(false);
4105         drain_global_stack(false);
4106       } else {
4107         break;
4108       }
4109     }
4110   }
4111 
4112   // If we are about to wrap up and go into termination, check if we
4113   // should raise the overflow flag.
4114   if (do_termination && !has_aborted()) {
4115     if (_cm->force_overflow()->should_force()) {
4116       _cm->set_has_overflown();
4117       regular_clock_call();
4118     }
4119   }
4120 
4121   // We still haven't aborted. Now, let's try to get into the
4122   // termination protocol.
4123   if (do_termination && !has_aborted()) {
4124     // We cannot check whether the global stack is empty, since other
4125     // tasks might be concurrently pushing objects on it.
4126     // Separated the asserts so that we know which one fires.
4127     assert(_cm->out_of_regions(), "only way to reach here");
4128     assert(_task_queue->size() == 0, "only way to reach here");
4129 
4130     if (_cm->verbose_low()) {
4131       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4132     }
4133 
4134     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4135 
4136     // The CMTask class also extends the TerminatorTerminator class,
4137     // hence its should_exit_termination() method will also decide
4138     // whether to exit the termination protocol or not.
4139     bool finished = (is_serial ||
4140                      _cm->terminator()->offer_termination(this));
4141     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4142     _termination_time_ms +=
4143       termination_end_time_ms - _termination_start_time_ms;
4144 
4145     if (finished) {
4146       // We're all done.
4147 
4148       if (_worker_id == 0) {
4149         // let's allow task 0 to do this
4150         if (concurrent()) {
4151           assert(_cm->concurrent_marking_in_progress(), "invariant");
4152           // we need to set this to false before the next
4153           // safepoint. This way we ensure that the marking phase
4154           // doesn't observe any more heap expansions.
4155           _cm->clear_concurrent_marking_in_progress();
4156         }
4157       }
4158 
4159       // We can now guarantee that the global stack is empty, since
4160       // all other tasks have finished. We separated the guarantees so
4161       // that, if a condition is false, we can immediately find out
4162       // which one.
4163       guarantee(_cm->out_of_regions(), "only way to reach here");
4164       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4165       guarantee(_task_queue->size() == 0, "only way to reach here");
4166       guarantee(!_cm->has_overflown(), "only way to reach here");
4167       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4168 
4169       if (_cm->verbose_low()) {
4170         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4171       }
4172     } else {
4173       // Apparently there's more work to do. Let's abort this task. It
4174       // will restart it and we can hopefully find more things to do.
4175 
4176       if (_cm->verbose_low()) {
4177         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4178                                _worker_id);
4179       }
4180 
4181       set_has_aborted();
4182       statsOnly( ++_aborted_termination );
4183     }
4184   }
4185 
4186   // Mainly for debugging purposes to make sure that a pointer to the
4187   // closure which was statically allocated in this frame doesn't
4188   // escape it by accident.
4189   set_cm_oop_closure(NULL);
4190   double end_time_ms = os::elapsedVTime() * 1000.0;
4191   double elapsed_time_ms = end_time_ms - _start_time_ms;
4192   // Update the step history.
4193   _step_times_ms.add(elapsed_time_ms);
4194 
4195   if (has_aborted()) {
4196     // The task was aborted for some reason.
4197 
4198     statsOnly( ++_aborted );
4199 
4200     if (_has_timed_out) {
4201       double diff_ms = elapsed_time_ms - _time_target_ms;
4202       // Keep statistics of how well we did with respect to hitting
4203       // our target only if we actually timed out (if we aborted for
4204       // other reasons, then the results might get skewed).
4205       _marking_step_diffs_ms.add(diff_ms);
4206     }
4207 
4208     if (_cm->has_overflown()) {
4209       // This is the interesting one. We aborted because a global
4210       // overflow was raised. This means we have to restart the
4211       // marking phase and start iterating over regions. However, in
4212       // order to do this we have to make sure that all tasks stop
4213       // what they are doing and re-initialize in a safe manner. We
4214       // will achieve this with the use of two barrier sync points.
4215 
4216       if (_cm->verbose_low()) {
4217         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4218       }
4219 
4220       if (!is_serial) {
4221         // We only need to enter the sync barrier if being called
4222         // from a parallel context
4223         _cm->enter_first_sync_barrier(_worker_id);
4224 
4225         // When we exit this sync barrier we know that all tasks have
4226         // stopped doing marking work. So, it's now safe to
4227         // re-initialize our data structures. At the end of this method,
4228         // task 0 will clear the global data structures.
4229       }
4230 
4231       statsOnly( ++_aborted_overflow );
4232 
4233       // We clear the local state of this task...
4234       clear_region_fields();
4235 
4236       if (!is_serial) {
4237         // ...and enter the second barrier.
4238         _cm->enter_second_sync_barrier(_worker_id);
4239       }
4240       // At this point, if we're during the concurrent phase of
4241       // marking, everything has been re-initialized and we're
4242       // ready to restart.
4243     }
4244 
4245     if (_cm->verbose_low()) {
4246       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4247                              "elapsed = %1.2lfms <<<<<<<<<<",
4248                              _worker_id, _time_target_ms, elapsed_time_ms);
4249       if (_cm->has_aborted()) {
4250         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4251                                _worker_id);
4252       }
4253     }
4254   } else {
4255     if (_cm->verbose_low()) {
4256       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4257                              "elapsed = %1.2lfms <<<<<<<<<<",
4258                              _worker_id, _time_target_ms, elapsed_time_ms);
4259     }
4260   }
4261 
4262   _claimed = false;
4263 }
4264 
4265 CMTask::CMTask(uint worker_id,
4266                ConcurrentMark* cm,
4267                size_t* marked_bytes,
4268                BitMap* card_bm,
4269                CMTaskQueue* task_queue,
4270                CMTaskQueueSet* task_queues)
4271   : _g1h(G1CollectedHeap::heap()),
4272     _worker_id(worker_id), _cm(cm),
4273     _claimed(false),
4274     _nextMarkBitMap(NULL), _hash_seed(17),
4275     _task_queue(task_queue),
4276     _task_queues(task_queues),
4277     _cm_oop_closure(NULL),
4278     _marked_bytes_array(marked_bytes),
4279     _card_bm(card_bm) {
4280   guarantee(task_queue != NULL, "invariant");
4281   guarantee(task_queues != NULL, "invariant");
4282 
4283   statsOnly( _clock_due_to_scanning = 0;
4284              _clock_due_to_marking  = 0 );
4285 
4286   _marking_step_diffs_ms.add(0.5);
4287 }
4288 
4289 // These are formatting macros that are used below to ensure
4290 // consistent formatting. The *_H_* versions are used to format the
4291 // header for a particular value and they should be kept consistent
4292 // with the corresponding macro. Also note that most of the macros add
4293 // the necessary white space (as a prefix) which makes them a bit
4294 // easier to compose.
4295 
4296 // All the output lines are prefixed with this string to be able to
4297 // identify them easily in a large log file.
4298 #define G1PPRL_LINE_PREFIX            "###"
4299 
4300 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4301 #ifdef _LP64
4302 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4303 #else // _LP64
4304 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4305 #endif // _LP64
4306 
4307 // For per-region info
4308 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4309 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4310 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4311 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4312 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4313 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4314 
4315 // For summary info
4316 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4317 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4318 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4319 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4320 
4321 G1PrintRegionLivenessInfoClosure::
4322 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4323   : _out(out),
4324     _total_used_bytes(0), _total_capacity_bytes(0),
4325     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4326     _hum_used_bytes(0), _hum_capacity_bytes(0),
4327     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4328     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4329   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4330   MemRegion g1_reserved = g1h->g1_reserved();
4331   double now = os::elapsedTime();
4332 
4333   // Print the header of the output.
4334   _out->cr();
4335   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4336   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4337                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4338                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4339                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4340                  HeapRegion::GrainBytes);
4341   _out->print_cr(G1PPRL_LINE_PREFIX);
4342   _out->print_cr(G1PPRL_LINE_PREFIX
4343                 G1PPRL_TYPE_H_FORMAT
4344                 G1PPRL_ADDR_BASE_H_FORMAT
4345                 G1PPRL_BYTE_H_FORMAT
4346                 G1PPRL_BYTE_H_FORMAT
4347                 G1PPRL_BYTE_H_FORMAT
4348                 G1PPRL_DOUBLE_H_FORMAT
4349                 G1PPRL_BYTE_H_FORMAT
4350                 G1PPRL_BYTE_H_FORMAT,
4351                 "type", "address-range",
4352                 "used", "prev-live", "next-live", "gc-eff",
4353                 "remset", "code-roots");
4354   _out->print_cr(G1PPRL_LINE_PREFIX
4355                 G1PPRL_TYPE_H_FORMAT
4356                 G1PPRL_ADDR_BASE_H_FORMAT
4357                 G1PPRL_BYTE_H_FORMAT
4358                 G1PPRL_BYTE_H_FORMAT
4359                 G1PPRL_BYTE_H_FORMAT
4360                 G1PPRL_DOUBLE_H_FORMAT
4361                 G1PPRL_BYTE_H_FORMAT
4362                 G1PPRL_BYTE_H_FORMAT,
4363                 "", "",
4364                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4365                 "(bytes)", "(bytes)");
4366 }
4367 
4368 // It takes as a parameter a reference to one of the _hum_* fields, it
4369 // deduces the corresponding value for a region in a humongous region
4370 // series (either the region size, or what's left if the _hum_* field
4371 // is < the region size), and updates the _hum_* field accordingly.
4372 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4373   size_t bytes = 0;
4374   // The > 0 check is to deal with the prev and next live bytes which
4375   // could be 0.
4376   if (*hum_bytes > 0) {
4377     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4378     *hum_bytes -= bytes;
4379   }
4380   return bytes;
4381 }
4382 
4383 // It deduces the values for a region in a humongous region series
4384 // from the _hum_* fields and updates those accordingly. It assumes
4385 // that that _hum_* fields have already been set up from the "starts
4386 // humongous" region and we visit the regions in address order.
4387 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4388                                                      size_t* capacity_bytes,
4389                                                      size_t* prev_live_bytes,
4390                                                      size_t* next_live_bytes) {
4391   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4392   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4393   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4394   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4395   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4396 }
4397 
4398 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4399   const char* type       = r->get_type_str();
4400   HeapWord* bottom       = r->bottom();
4401   HeapWord* end          = r->end();
4402   size_t capacity_bytes  = r->capacity();
4403   size_t used_bytes      = r->used();
4404   size_t prev_live_bytes = r->live_bytes();
4405   size_t next_live_bytes = r->next_live_bytes();
4406   double gc_eff          = r->gc_efficiency();
4407   size_t remset_bytes    = r->rem_set()->mem_size();
4408   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4409 
4410   if (r->is_starts_humongous()) {
4411     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4412            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4413            "they should have been zeroed after the last time we used them");
4414     // Set up the _hum_* fields.
4415     _hum_capacity_bytes  = capacity_bytes;
4416     _hum_used_bytes      = used_bytes;
4417     _hum_prev_live_bytes = prev_live_bytes;
4418     _hum_next_live_bytes = next_live_bytes;
4419     get_hum_bytes(&used_bytes, &capacity_bytes,
4420                   &prev_live_bytes, &next_live_bytes);
4421     end = bottom + HeapRegion::GrainWords;
4422   } else if (r->is_continues_humongous()) {
4423     get_hum_bytes(&used_bytes, &capacity_bytes,
4424                   &prev_live_bytes, &next_live_bytes);
4425     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4426   }
4427 
4428   _total_used_bytes      += used_bytes;
4429   _total_capacity_bytes  += capacity_bytes;
4430   _total_prev_live_bytes += prev_live_bytes;
4431   _total_next_live_bytes += next_live_bytes;
4432   _total_remset_bytes    += remset_bytes;
4433   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4434 
4435   // Print a line for this particular region.
4436   _out->print_cr(G1PPRL_LINE_PREFIX
4437                  G1PPRL_TYPE_FORMAT
4438                  G1PPRL_ADDR_BASE_FORMAT
4439                  G1PPRL_BYTE_FORMAT
4440                  G1PPRL_BYTE_FORMAT
4441                  G1PPRL_BYTE_FORMAT
4442                  G1PPRL_DOUBLE_FORMAT
4443                  G1PPRL_BYTE_FORMAT
4444                  G1PPRL_BYTE_FORMAT,
4445                  type, p2i(bottom), p2i(end),
4446                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4447                  remset_bytes, strong_code_roots_bytes);
4448 
4449   return false;
4450 }
4451 
4452 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4453   // add static memory usages to remembered set sizes
4454   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4455   // Print the footer of the output.
4456   _out->print_cr(G1PPRL_LINE_PREFIX);
4457   _out->print_cr(G1PPRL_LINE_PREFIX
4458                  " SUMMARY"
4459                  G1PPRL_SUM_MB_FORMAT("capacity")
4460                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4461                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4462                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4463                  G1PPRL_SUM_MB_FORMAT("remset")
4464                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4465                  bytes_to_mb(_total_capacity_bytes),
4466                  bytes_to_mb(_total_used_bytes),
4467                  perc(_total_used_bytes, _total_capacity_bytes),
4468                  bytes_to_mb(_total_prev_live_bytes),
4469                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4470                  bytes_to_mb(_total_next_live_bytes),
4471                  perc(_total_next_live_bytes, _total_capacity_bytes),
4472                  bytes_to_mb(_total_remset_bytes),
4473                  bytes_to_mb(_total_strong_code_roots_bytes));
4474   _out->cr();
4475 }