1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1ErgoVerbose.hpp"
  34 #include "gc/g1/g1Log.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1RemSet.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionManager.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/gcTimer.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.hpp"
  45 #include "gc/shared/genOopClosures.inline.hpp"
  46 #include "gc/shared/referencePolicy.hpp"
  47 #include "gc/shared/strongRootsScope.hpp"
  48 #include "gc/shared/taskqueue.inline.hpp"
  49 #include "gc/shared/vmGCOperations.hpp"
  50 #include "memory/allocation.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "runtime/atomic.inline.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/java.hpp"
  56 #include "runtime/prefetch.inline.hpp"
  57 #include "services/memTracker.hpp"
  58 
  59 // Concurrent marking bit map wrapper
  60 
  61 CMBitMapRO::CMBitMapRO(int shifter) :
  62   _bm(),
  63   _shifter(shifter) {
  64   _bmStartWord = 0;
  65   _bmWordSize = 0;
  66 }
  67 
  68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  69                                                const HeapWord* limit) const {
  70   // First we must round addr *up* to a possible object boundary.
  71   addr = (HeapWord*)align_size_up((intptr_t)addr,
  72                                   HeapWordSize << _shifter);
  73   size_t addrOffset = heapWordToOffset(addr);
  74   if (limit == NULL) {
  75     limit = _bmStartWord + _bmWordSize;
  76   }
  77   size_t limitOffset = heapWordToOffset(limit);
  78   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  79   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  80   assert(nextAddr >= addr, "get_next_one postcondition");
  81   assert(nextAddr == limit || isMarked(nextAddr),
  82          "get_next_one postcondition");
  83   return nextAddr;
  84 }
  85 
  86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  87                                                  const HeapWord* limit) const {
  88   size_t addrOffset = heapWordToOffset(addr);
  89   if (limit == NULL) {
  90     limit = _bmStartWord + _bmWordSize;
  91   }
  92   size_t limitOffset = heapWordToOffset(limit);
  93   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  94   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  95   assert(nextAddr >= addr, "get_next_one postcondition");
  96   assert(nextAddr == limit || !isMarked(nextAddr),
  97          "get_next_one postcondition");
  98   return nextAddr;
  99 }
 100 
 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
 102   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 103   return (int) (diff >> _shifter);
 104 }
 105 
 106 #ifndef PRODUCT
 107 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 108   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 109   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 110          "size inconsistency");
 111   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 112          _bmWordSize  == heap_rs.word_size();
 113 }
 114 #endif
 115 
 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 117   _bm.print_on_error(st, prefix);
 118 }
 119 
 120 size_t CMBitMap::compute_size(size_t heap_size) {
 121   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 122 }
 123 
 124 size_t CMBitMap::mark_distance() {
 125   return MinObjAlignmentInBytes * BitsPerByte;
 126 }
 127 
 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 129   _bmStartWord = heap.start();
 130   _bmWordSize = heap.word_size();
 131 
 132   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 133   _bm.set_size(_bmWordSize >> _shifter);
 134 
 135   storage->set_mapping_changed_listener(&_listener);
 136 }
 137 
 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 139   if (zero_filled) {
 140     return;
 141   }
 142   // We need to clear the bitmap on commit, removing any existing information.
 143   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 144   _bm->clearRange(mr);
 145 }
 146 
 147 // Closure used for clearing the given mark bitmap.
 148 class ClearBitmapHRClosure : public HeapRegionClosure {
 149  private:
 150   ConcurrentMark* _cm;
 151   CMBitMap* _bitmap;
 152   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 153  public:
 154   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 155     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 156   }
 157 
 158   virtual bool doHeapRegion(HeapRegion* r) {
 159     size_t const chunk_size_in_words = M / HeapWordSize;
 160 
 161     HeapWord* cur = r->bottom();
 162     HeapWord* const end = r->end();
 163 
 164     while (cur < end) {
 165       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 166       _bitmap->clearRange(mr);
 167 
 168       cur += chunk_size_in_words;
 169 
 170       // Abort iteration if after yielding the marking has been aborted.
 171       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 172         return true;
 173       }
 174       // Repeat the asserts from before the start of the closure. We will do them
 175       // as asserts here to minimize their overhead on the product. However, we
 176       // will have them as guarantees at the beginning / end of the bitmap
 177       // clearing to get some checking in the product.
 178       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 179       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 180     }
 181 
 182     return false;
 183   }
 184 };
 185 
 186 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 187   ClearBitmapHRClosure* _cl;
 188   HeapRegionClaimer     _hrclaimer;
 189   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 190 
 191 public:
 192   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 193       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 194 
 195   void work(uint worker_id) {
 196     SuspendibleThreadSetJoiner sts_join(_suspendible);
 197     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 198   }
 199 };
 200 
 201 void CMBitMap::clearAll() {
 202   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 203   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 204   uint n_workers = g1h->workers()->active_workers();
 205   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 206   g1h->workers()->run_task(&task);
 207   guarantee(cl.complete(), "Must have completed iteration.");
 208   return;
 209 }
 210 
 211 void CMBitMap::markRange(MemRegion mr) {
 212   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 213   assert(!mr.is_empty(), "unexpected empty region");
 214   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 215           ((HeapWord *) mr.end())),
 216          "markRange memory region end is not card aligned");
 217   // convert address range into offset range
 218   _bm.at_put_range(heapWordToOffset(mr.start()),
 219                    heapWordToOffset(mr.end()), true);
 220 }
 221 
 222 void CMBitMap::clearRange(MemRegion mr) {
 223   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 224   assert(!mr.is_empty(), "unexpected empty region");
 225   // convert address range into offset range
 226   _bm.at_put_range(heapWordToOffset(mr.start()),
 227                    heapWordToOffset(mr.end()), false);
 228 }
 229 
 230 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 231                                             HeapWord* end_addr) {
 232   HeapWord* start = getNextMarkedWordAddress(addr);
 233   start = MIN2(start, end_addr);
 234   HeapWord* end   = getNextUnmarkedWordAddress(start);
 235   end = MIN2(end, end_addr);
 236   assert(start <= end, "Consistency check");
 237   MemRegion mr(start, end);
 238   if (!mr.is_empty()) {
 239     clearRange(mr);
 240   }
 241   return mr;
 242 }
 243 
 244 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 245   _base(NULL), _cm(cm)
 246 #ifdef ASSERT
 247   , _drain_in_progress(false)
 248   , _drain_in_progress_yields(false)
 249 #endif
 250 {}
 251 
 252 bool CMMarkStack::allocate(size_t capacity) {
 253   // allocate a stack of the requisite depth
 254   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 255   if (!rs.is_reserved()) {
 256     warning("ConcurrentMark MarkStack allocation failure");
 257     return false;
 258   }
 259   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 260   if (!_virtual_space.initialize(rs, rs.size())) {
 261     warning("ConcurrentMark MarkStack backing store failure");
 262     // Release the virtual memory reserved for the marking stack
 263     rs.release();
 264     return false;
 265   }
 266   assert(_virtual_space.committed_size() == rs.size(),
 267          "Didn't reserve backing store for all of ConcurrentMark stack?");
 268   _base = (oop*) _virtual_space.low();
 269   setEmpty();
 270   _capacity = (jint) capacity;
 271   _saved_index = -1;
 272   _should_expand = false;
 273   return true;
 274 }
 275 
 276 void CMMarkStack::expand() {
 277   // Called, during remark, if we've overflown the marking stack during marking.
 278   assert(isEmpty(), "stack should been emptied while handling overflow");
 279   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 280   // Clear expansion flag
 281   _should_expand = false;
 282   if (_capacity == (jint) MarkStackSizeMax) {
 283     if (PrintGCDetails && Verbose) {
 284       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 285     }
 286     return;
 287   }
 288   // Double capacity if possible
 289   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 290   // Do not give up existing stack until we have managed to
 291   // get the double capacity that we desired.
 292   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 293                                                            sizeof(oop)));
 294   if (rs.is_reserved()) {
 295     // Release the backing store associated with old stack
 296     _virtual_space.release();
 297     // Reinitialize virtual space for new stack
 298     if (!_virtual_space.initialize(rs, rs.size())) {
 299       fatal("Not enough swap for expanded marking stack capacity");
 300     }
 301     _base = (oop*)(_virtual_space.low());
 302     _index = 0;
 303     _capacity = new_capacity;
 304   } else {
 305     if (PrintGCDetails && Verbose) {
 306       // Failed to double capacity, continue;
 307       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 308                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 309                           _capacity / K, new_capacity / K);
 310     }
 311   }
 312 }
 313 
 314 void CMMarkStack::set_should_expand() {
 315   // If we're resetting the marking state because of an
 316   // marking stack overflow, record that we should, if
 317   // possible, expand the stack.
 318   _should_expand = _cm->has_overflown();
 319 }
 320 
 321 CMMarkStack::~CMMarkStack() {
 322   if (_base != NULL) {
 323     _base = NULL;
 324     _virtual_space.release();
 325   }
 326 }
 327 
 328 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 329   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 330   jint start = _index;
 331   jint next_index = start + n;
 332   if (next_index > _capacity) {
 333     _overflow = true;
 334     return;
 335   }
 336   // Otherwise.
 337   _index = next_index;
 338   for (int i = 0; i < n; i++) {
 339     int ind = start + i;
 340     assert(ind < _capacity, "By overflow test above.");
 341     _base[ind] = ptr_arr[i];
 342   }
 343 }
 344 
 345 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 346   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 347   jint index = _index;
 348   if (index == 0) {
 349     *n = 0;
 350     return false;
 351   } else {
 352     int k = MIN2(max, index);
 353     jint  new_ind = index - k;
 354     for (int j = 0; j < k; j++) {
 355       ptr_arr[j] = _base[new_ind + j];
 356     }
 357     _index = new_ind;
 358     *n = k;
 359     return true;
 360   }
 361 }
 362 
 363 template<class OopClosureClass>
 364 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 365   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 366          || SafepointSynchronize::is_at_safepoint(),
 367          "Drain recursion must be yield-safe.");
 368   bool res = true;
 369   debug_only(_drain_in_progress = true);
 370   debug_only(_drain_in_progress_yields = yield_after);
 371   while (!isEmpty()) {
 372     oop newOop = pop();
 373     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 374     assert(newOop->is_oop(), "Expected an oop");
 375     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 376            "only grey objects on this stack");
 377     newOop->oop_iterate(cl);
 378     if (yield_after && _cm->do_yield_check()) {
 379       res = false;
 380       break;
 381     }
 382   }
 383   debug_only(_drain_in_progress = false);
 384   return res;
 385 }
 386 
 387 void CMMarkStack::note_start_of_gc() {
 388   assert(_saved_index == -1,
 389          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 390   _saved_index = _index;
 391 }
 392 
 393 void CMMarkStack::note_end_of_gc() {
 394   // This is intentionally a guarantee, instead of an assert. If we
 395   // accidentally add something to the mark stack during GC, it
 396   // will be a correctness issue so it's better if we crash. we'll
 397   // only check this once per GC anyway, so it won't be a performance
 398   // issue in any way.
 399   guarantee(_saved_index == _index,
 400             err_msg("saved index: %d index: %d", _saved_index, _index));
 401   _saved_index = -1;
 402 }
 403 
 404 void CMMarkStack::oops_do(OopClosure* f) {
 405   assert(_saved_index == _index,
 406          err_msg("saved index: %d index: %d", _saved_index, _index));
 407   for (int i = 0; i < _index; i += 1) {
 408     f->do_oop(&_base[i]);
 409   }
 410 }
 411 
 412 CMRootRegions::CMRootRegions() :
 413   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 414   _should_abort(false),  _next_survivor(NULL) { }
 415 
 416 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 417   _young_list = g1h->young_list();
 418   _cm = cm;
 419 }
 420 
 421 void CMRootRegions::prepare_for_scan() {
 422   assert(!scan_in_progress(), "pre-condition");
 423 
 424   // Currently, only survivors can be root regions.
 425   assert(_next_survivor == NULL, "pre-condition");
 426   _next_survivor = _young_list->first_survivor_region();
 427   _scan_in_progress = (_next_survivor != NULL);
 428   _should_abort = false;
 429 }
 430 
 431 HeapRegion* CMRootRegions::claim_next() {
 432   if (_should_abort) {
 433     // If someone has set the should_abort flag, we return NULL to
 434     // force the caller to bail out of their loop.
 435     return NULL;
 436   }
 437 
 438   // Currently, only survivors can be root regions.
 439   HeapRegion* res = _next_survivor;
 440   if (res != NULL) {
 441     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 442     // Read it again in case it changed while we were waiting for the lock.
 443     res = _next_survivor;
 444     if (res != NULL) {
 445       if (res == _young_list->last_survivor_region()) {
 446         // We just claimed the last survivor so store NULL to indicate
 447         // that we're done.
 448         _next_survivor = NULL;
 449       } else {
 450         _next_survivor = res->get_next_young_region();
 451       }
 452     } else {
 453       // Someone else claimed the last survivor while we were trying
 454       // to take the lock so nothing else to do.
 455     }
 456   }
 457   assert(res == NULL || res->is_survivor(), "post-condition");
 458 
 459   return res;
 460 }
 461 
 462 void CMRootRegions::scan_finished() {
 463   assert(scan_in_progress(), "pre-condition");
 464 
 465   // Currently, only survivors can be root regions.
 466   if (!_should_abort) {
 467     assert(_next_survivor == NULL, "we should have claimed all survivors");
 468   }
 469   _next_survivor = NULL;
 470 
 471   {
 472     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 473     _scan_in_progress = false;
 474     RootRegionScan_lock->notify_all();
 475   }
 476 }
 477 
 478 bool CMRootRegions::wait_until_scan_finished() {
 479   if (!scan_in_progress()) return false;
 480 
 481   {
 482     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 483     while (scan_in_progress()) {
 484       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 485     }
 486   }
 487   return true;
 488 }
 489 
 490 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 491 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 492 #endif // _MSC_VER
 493 
 494 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 495   return MAX2((n_par_threads + 2) / 4, 1U);
 496 }
 497 
 498 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 499   _g1h(g1h),
 500   _markBitMap1(),
 501   _markBitMap2(),
 502   _parallel_marking_threads(0),
 503   _max_parallel_marking_threads(0),
 504   _sleep_factor(0.0),
 505   _marking_task_overhead(1.0),
 506   _cleanup_sleep_factor(0.0),
 507   _cleanup_task_overhead(1.0),
 508   _cleanup_list("Cleanup List"),
 509   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 510   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 511             CardTableModRefBS::card_shift,
 512             false /* in_resource_area*/),
 513 
 514   _prevMarkBitMap(&_markBitMap1),
 515   _nextMarkBitMap(&_markBitMap2),
 516 
 517   _markStack(this),
 518   // _finger set in set_non_marking_state
 519 
 520   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 521   // _active_tasks set in set_non_marking_state
 522   // _tasks set inside the constructor
 523   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 524   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 525 
 526   _has_overflown(false),
 527   _concurrent(false),
 528   _has_aborted(false),
 529   _aborted_gc_id(GCId::undefined()),
 530   _restart_for_overflow(false),
 531   _concurrent_marking_in_progress(false),
 532 
 533   // _verbose_level set below
 534 
 535   _init_times(),
 536   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 537   _cleanup_times(),
 538   _total_counting_time(0.0),
 539   _total_rs_scrub_time(0.0),
 540 
 541   _parallel_workers(NULL),
 542 
 543   _count_card_bitmaps(NULL),
 544   _count_marked_bytes(NULL),
 545   _completed_initialization(false) {
 546   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 547   if (verbose_level < no_verbose) {
 548     verbose_level = no_verbose;
 549   }
 550   if (verbose_level > high_verbose) {
 551     verbose_level = high_verbose;
 552   }
 553   _verbose_level = verbose_level;
 554 
 555   if (verbose_low()) {
 556     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 557                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 558   }
 559 
 560   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 561   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 562 
 563   // Create & start a ConcurrentMark thread.
 564   _cmThread = new ConcurrentMarkThread(this);
 565   assert(cmThread() != NULL, "CM Thread should have been created");
 566   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 567   if (_cmThread->osthread() == NULL) {
 568       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 569   }
 570 
 571   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 572   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 573   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 574 
 575   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 576   satb_qs.set_buffer_size(G1SATBBufferSize);
 577 
 578   _root_regions.init(_g1h, this);
 579 
 580   if (ConcGCThreads > ParallelGCThreads) {
 581     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 582             "than ParallelGCThreads (" UINTX_FORMAT ").",
 583             ConcGCThreads, ParallelGCThreads);
 584     return;
 585   }
 586   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 587     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 588     // if both are set
 589     _sleep_factor             = 0.0;
 590     _marking_task_overhead    = 1.0;
 591   } else if (G1MarkingOverheadPercent > 0) {
 592     // We will calculate the number of parallel marking threads based
 593     // on a target overhead with respect to the soft real-time goal
 594     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 595     double overall_cm_overhead =
 596       (double) MaxGCPauseMillis * marking_overhead /
 597       (double) GCPauseIntervalMillis;
 598     double cpu_ratio = 1.0 / (double) os::processor_count();
 599     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 600     double marking_task_overhead =
 601       overall_cm_overhead / marking_thread_num *
 602                                               (double) os::processor_count();
 603     double sleep_factor =
 604                        (1.0 - marking_task_overhead) / marking_task_overhead;
 605 
 606     FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 607     _sleep_factor             = sleep_factor;
 608     _marking_task_overhead    = marking_task_overhead;
 609   } else {
 610     // Calculate the number of parallel marking threads by scaling
 611     // the number of parallel GC threads.
 612     uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 613     FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 614     _sleep_factor             = 0.0;
 615     _marking_task_overhead    = 1.0;
 616   }
 617 
 618   assert(ConcGCThreads > 0, "Should have been set");
 619   _parallel_marking_threads = (uint) ConcGCThreads;
 620   _max_parallel_marking_threads = _parallel_marking_threads;
 621 
 622   if (parallel_marking_threads() > 1) {
 623     _cleanup_task_overhead = 1.0;
 624   } else {
 625     _cleanup_task_overhead = marking_task_overhead();
 626   }
 627   _cleanup_sleep_factor =
 628                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 629 
 630 #if 0
 631   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 632   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 633   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 634   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 635   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 636 #endif
 637 
 638   _parallel_workers = new FlexibleWorkGang("G1 Marker",
 639        _max_parallel_marking_threads, false, true);
 640   if (_parallel_workers == NULL) {
 641     vm_exit_during_initialization("Failed necessary allocation.");
 642   } else {
 643     _parallel_workers->initialize_workers();
 644   }
 645 
 646   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 647     size_t mark_stack_size =
 648       MIN2(MarkStackSizeMax,
 649           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 650     // Verify that the calculated value for MarkStackSize is in range.
 651     // It would be nice to use the private utility routine from Arguments.
 652     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 653       warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 654               "must be between 1 and " SIZE_FORMAT,
 655               mark_stack_size, MarkStackSizeMax);
 656       return;
 657     }
 658     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 659   } else {
 660     // Verify MarkStackSize is in range.
 661     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 662       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 663         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 664           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 665                   "must be between 1 and " SIZE_FORMAT,
 666                   MarkStackSize, MarkStackSizeMax);
 667           return;
 668         }
 669       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 670         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 671           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 672                   " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 673                   MarkStackSize, MarkStackSizeMax);
 674           return;
 675         }
 676       }
 677     }
 678   }
 679 
 680   if (!_markStack.allocate(MarkStackSize)) {
 681     warning("Failed to allocate CM marking stack");
 682     return;
 683   }
 684 
 685   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 686   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 687 
 688   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 689   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 690 
 691   BitMap::idx_t card_bm_size = _card_bm.size();
 692 
 693   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 694   _active_tasks = _max_worker_id;
 695 
 696   uint max_regions = _g1h->max_regions();
 697   for (uint i = 0; i < _max_worker_id; ++i) {
 698     CMTaskQueue* task_queue = new CMTaskQueue();
 699     task_queue->initialize();
 700     _task_queues->register_queue(i, task_queue);
 701 
 702     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 703     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 704 
 705     _tasks[i] = new CMTask(i, this,
 706                            _count_marked_bytes[i],
 707                            &_count_card_bitmaps[i],
 708                            task_queue, _task_queues);
 709 
 710     _accum_task_vtime[i] = 0.0;
 711   }
 712 
 713   // Calculate the card number for the bottom of the heap. Used
 714   // in biasing indexes into the accounting card bitmaps.
 715   _heap_bottom_card_num =
 716     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 717                                 CardTableModRefBS::card_shift);
 718 
 719   // Clear all the liveness counting data
 720   clear_all_count_data();
 721 
 722   // so that the call below can read a sensible value
 723   _heap_start = g1h->reserved_region().start();
 724   set_non_marking_state();
 725   _completed_initialization = true;
 726 }
 727 
 728 void ConcurrentMark::reset() {
 729   // Starting values for these two. This should be called in a STW
 730   // phase.
 731   MemRegion reserved = _g1h->g1_reserved();
 732   _heap_start = reserved.start();
 733   _heap_end   = reserved.end();
 734 
 735   // Separated the asserts so that we know which one fires.
 736   assert(_heap_start != NULL, "heap bounds should look ok");
 737   assert(_heap_end != NULL, "heap bounds should look ok");
 738   assert(_heap_start < _heap_end, "heap bounds should look ok");
 739 
 740   // Reset all the marking data structures and any necessary flags
 741   reset_marking_state();
 742 
 743   if (verbose_low()) {
 744     gclog_or_tty->print_cr("[global] resetting");
 745   }
 746 
 747   // We do reset all of them, since different phases will use
 748   // different number of active threads. So, it's easiest to have all
 749   // of them ready.
 750   for (uint i = 0; i < _max_worker_id; ++i) {
 751     _tasks[i]->reset(_nextMarkBitMap);
 752   }
 753 
 754   // we need this to make sure that the flag is on during the evac
 755   // pause with initial mark piggy-backed
 756   set_concurrent_marking_in_progress();
 757 }
 758 
 759 
 760 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 761   _markStack.set_should_expand();
 762   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 763   if (clear_overflow) {
 764     clear_has_overflown();
 765   } else {
 766     assert(has_overflown(), "pre-condition");
 767   }
 768   _finger = _heap_start;
 769 
 770   for (uint i = 0; i < _max_worker_id; ++i) {
 771     CMTaskQueue* queue = _task_queues->queue(i);
 772     queue->set_empty();
 773   }
 774 }
 775 
 776 void ConcurrentMark::set_concurrency(uint active_tasks) {
 777   assert(active_tasks <= _max_worker_id, "we should not have more");
 778 
 779   _active_tasks = active_tasks;
 780   // Need to update the three data structures below according to the
 781   // number of active threads for this phase.
 782   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 783   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 784   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 785 }
 786 
 787 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 788   set_concurrency(active_tasks);
 789 
 790   _concurrent = concurrent;
 791   // We propagate this to all tasks, not just the active ones.
 792   for (uint i = 0; i < _max_worker_id; ++i)
 793     _tasks[i]->set_concurrent(concurrent);
 794 
 795   if (concurrent) {
 796     set_concurrent_marking_in_progress();
 797   } else {
 798     // We currently assume that the concurrent flag has been set to
 799     // false before we start remark. At this point we should also be
 800     // in a STW phase.
 801     assert(!concurrent_marking_in_progress(), "invariant");
 802     assert(out_of_regions(),
 803            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 804                    p2i(_finger), p2i(_heap_end)));
 805   }
 806 }
 807 
 808 void ConcurrentMark::set_non_marking_state() {
 809   // We set the global marking state to some default values when we're
 810   // not doing marking.
 811   reset_marking_state();
 812   _active_tasks = 0;
 813   clear_concurrent_marking_in_progress();
 814 }
 815 
 816 ConcurrentMark::~ConcurrentMark() {
 817   // The ConcurrentMark instance is never freed.
 818   ShouldNotReachHere();
 819 }
 820 
 821 void ConcurrentMark::clearNextBitmap() {
 822   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 823 
 824   // Make sure that the concurrent mark thread looks to still be in
 825   // the current cycle.
 826   guarantee(cmThread()->during_cycle(), "invariant");
 827 
 828   // We are finishing up the current cycle by clearing the next
 829   // marking bitmap and getting it ready for the next cycle. During
 830   // this time no other cycle can start. So, let's make sure that this
 831   // is the case.
 832   guarantee(!g1h->mark_in_progress(), "invariant");
 833 
 834   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 835   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 836   _parallel_workers->run_task(&task);
 837 
 838   // Clear the liveness counting data. If the marking has been aborted, the abort()
 839   // call already did that.
 840   if (cl.complete()) {
 841     clear_all_count_data();
 842   }
 843 
 844   // Repeat the asserts from above.
 845   guarantee(cmThread()->during_cycle(), "invariant");
 846   guarantee(!g1h->mark_in_progress(), "invariant");
 847 }
 848 
 849 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 850   CMBitMap* _bitmap;
 851   bool _error;
 852  public:
 853   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 854   }
 855 
 856   virtual bool doHeapRegion(HeapRegion* r) {
 857     // This closure can be called concurrently to the mutator, so we must make sure
 858     // that the result of the getNextMarkedWordAddress() call is compared to the
 859     // value passed to it as limit to detect any found bits.
 860     // We can use the region's orig_end() for the limit and the comparison value
 861     // as it always contains the "real" end of the region that never changes and
 862     // has no side effects.
 863     // Due to the latter, there can also be no problem with the compiler generating
 864     // reloads of the orig_end() call.
 865     HeapWord* end = r->orig_end();
 866     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 867   }
 868 };
 869 
 870 bool ConcurrentMark::nextMarkBitmapIsClear() {
 871   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 872   _g1h->heap_region_iterate(&cl);
 873   return cl.complete();
 874 }
 875 
 876 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 877 public:
 878   bool doHeapRegion(HeapRegion* r) {
 879     if (!r->is_continues_humongous()) {
 880       r->note_start_of_marking();
 881     }
 882     return false;
 883   }
 884 };
 885 
 886 void ConcurrentMark::checkpointRootsInitialPre() {
 887   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 888   G1CollectorPolicy* g1p = g1h->g1_policy();
 889 
 890   _has_aborted = false;
 891 
 892   // Initialize marking structures. This has to be done in a STW phase.
 893   reset();
 894 
 895   // For each region note start of marking.
 896   NoteStartOfMarkHRClosure startcl;
 897   g1h->heap_region_iterate(&startcl);
 898 }
 899 
 900 
 901 void ConcurrentMark::checkpointRootsInitialPost() {
 902   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 903 
 904   // If we force an overflow during remark, the remark operation will
 905   // actually abort and we'll restart concurrent marking. If we always
 906   // force an overflow during remark we'll never actually complete the
 907   // marking phase. So, we initialize this here, at the start of the
 908   // cycle, so that at the remaining overflow number will decrease at
 909   // every remark and we'll eventually not need to cause one.
 910   force_overflow_stw()->init();
 911 
 912   // Start Concurrent Marking weak-reference discovery.
 913   ReferenceProcessor* rp = g1h->ref_processor_cm();
 914   // enable ("weak") refs discovery
 915   rp->enable_discovery();
 916   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 917 
 918   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 919   // This is the start of  the marking cycle, we're expected all
 920   // threads to have SATB queues with active set to false.
 921   satb_mq_set.set_active_all_threads(true, /* new active value */
 922                                      false /* expected_active */);
 923 
 924   _root_regions.prepare_for_scan();
 925 
 926   // update_g1_committed() will be called at the end of an evac pause
 927   // when marking is on. So, it's also called at the end of the
 928   // initial-mark pause to update the heap end, if the heap expands
 929   // during it. No need to call it here.
 930 }
 931 
 932 /*
 933  * Notice that in the next two methods, we actually leave the STS
 934  * during the barrier sync and join it immediately afterwards. If we
 935  * do not do this, the following deadlock can occur: one thread could
 936  * be in the barrier sync code, waiting for the other thread to also
 937  * sync up, whereas another one could be trying to yield, while also
 938  * waiting for the other threads to sync up too.
 939  *
 940  * Note, however, that this code is also used during remark and in
 941  * this case we should not attempt to leave / enter the STS, otherwise
 942  * we'll either hit an assert (debug / fastdebug) or deadlock
 943  * (product). So we should only leave / enter the STS if we are
 944  * operating concurrently.
 945  *
 946  * Because the thread that does the sync barrier has left the STS, it
 947  * is possible to be suspended for a Full GC or an evacuation pause
 948  * could occur. This is actually safe, since the entering the sync
 949  * barrier is one of the last things do_marking_step() does, and it
 950  * doesn't manipulate any data structures afterwards.
 951  */
 952 
 953 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 954   bool barrier_aborted;
 955 
 956   if (verbose_low()) {
 957     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
 958   }
 959 
 960   {
 961     SuspendibleThreadSetLeaver sts_leave(concurrent());
 962     barrier_aborted = !_first_overflow_barrier_sync.enter();
 963   }
 964 
 965   // at this point everyone should have synced up and not be doing any
 966   // more work
 967 
 968   if (verbose_low()) {
 969     if (barrier_aborted) {
 970       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
 971     } else {
 972       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
 973     }
 974   }
 975 
 976   if (barrier_aborted) {
 977     // If the barrier aborted we ignore the overflow condition and
 978     // just abort the whole marking phase as quickly as possible.
 979     return;
 980   }
 981 
 982   // If we're executing the concurrent phase of marking, reset the marking
 983   // state; otherwise the marking state is reset after reference processing,
 984   // during the remark pause.
 985   // If we reset here as a result of an overflow during the remark we will
 986   // see assertion failures from any subsequent set_concurrency_and_phase()
 987   // calls.
 988   if (concurrent()) {
 989     // let the task associated with with worker 0 do this
 990     if (worker_id == 0) {
 991       // task 0 is responsible for clearing the global data structures
 992       // We should be here because of an overflow. During STW we should
 993       // not clear the overflow flag since we rely on it being true when
 994       // we exit this method to abort the pause and restart concurrent
 995       // marking.
 996       reset_marking_state(true /* clear_overflow */);
 997       force_overflow()->update();
 998 
 999       if (G1Log::fine()) {
1000         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1001         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1002       }
1003     }
1004   }
1005 
1006   // after this, each task should reset its own data structures then
1007   // then go into the second barrier
1008 }
1009 
1010 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1011   bool barrier_aborted;
1012 
1013   if (verbose_low()) {
1014     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1015   }
1016 
1017   {
1018     SuspendibleThreadSetLeaver sts_leave(concurrent());
1019     barrier_aborted = !_second_overflow_barrier_sync.enter();
1020   }
1021 
1022   // at this point everything should be re-initialized and ready to go
1023 
1024   if (verbose_low()) {
1025     if (barrier_aborted) {
1026       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1027     } else {
1028       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1029     }
1030   }
1031 }
1032 
1033 #ifndef PRODUCT
1034 void ForceOverflowSettings::init() {
1035   _num_remaining = G1ConcMarkForceOverflow;
1036   _force = false;
1037   update();
1038 }
1039 
1040 void ForceOverflowSettings::update() {
1041   if (_num_remaining > 0) {
1042     _num_remaining -= 1;
1043     _force = true;
1044   } else {
1045     _force = false;
1046   }
1047 }
1048 
1049 bool ForceOverflowSettings::should_force() {
1050   if (_force) {
1051     _force = false;
1052     return true;
1053   } else {
1054     return false;
1055   }
1056 }
1057 #endif // !PRODUCT
1058 
1059 class CMConcurrentMarkingTask: public AbstractGangTask {
1060 private:
1061   ConcurrentMark*       _cm;
1062   ConcurrentMarkThread* _cmt;
1063 
1064 public:
1065   void work(uint worker_id) {
1066     assert(Thread::current()->is_ConcurrentGC_thread(),
1067            "this should only be done by a conc GC thread");
1068     ResourceMark rm;
1069 
1070     double start_vtime = os::elapsedVTime();
1071 
1072     {
1073       SuspendibleThreadSetJoiner sts_join;
1074 
1075       assert(worker_id < _cm->active_tasks(), "invariant");
1076       CMTask* the_task = _cm->task(worker_id);
1077       the_task->record_start_time();
1078       if (!_cm->has_aborted()) {
1079         do {
1080           double start_vtime_sec = os::elapsedVTime();
1081           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1082 
1083           the_task->do_marking_step(mark_step_duration_ms,
1084                                     true  /* do_termination */,
1085                                     false /* is_serial*/);
1086 
1087           double end_vtime_sec = os::elapsedVTime();
1088           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1089           _cm->clear_has_overflown();
1090 
1091           _cm->do_yield_check(worker_id);
1092 
1093           jlong sleep_time_ms;
1094           if (!_cm->has_aborted() && the_task->has_aborted()) {
1095             sleep_time_ms =
1096               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1097             {
1098               SuspendibleThreadSetLeaver sts_leave;
1099               os::sleep(Thread::current(), sleep_time_ms, false);
1100             }
1101           }
1102         } while (!_cm->has_aborted() && the_task->has_aborted());
1103       }
1104       the_task->record_end_time();
1105       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1106     }
1107 
1108     double end_vtime = os::elapsedVTime();
1109     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1110   }
1111 
1112   CMConcurrentMarkingTask(ConcurrentMark* cm,
1113                           ConcurrentMarkThread* cmt) :
1114       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1115 
1116   ~CMConcurrentMarkingTask() { }
1117 };
1118 
1119 // Calculates the number of active workers for a concurrent
1120 // phase.
1121 uint ConcurrentMark::calc_parallel_marking_threads() {
1122   uint n_conc_workers = 0;
1123   if (!UseDynamicNumberOfGCThreads ||
1124       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1125        !ForceDynamicNumberOfGCThreads)) {
1126     n_conc_workers = max_parallel_marking_threads();
1127   } else {
1128     n_conc_workers =
1129       AdaptiveSizePolicy::calc_default_active_workers(
1130                                    max_parallel_marking_threads(),
1131                                    1, /* Minimum workers */
1132                                    parallel_marking_threads(),
1133                                    Threads::number_of_non_daemon_threads());
1134     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1135     // that scaling has already gone into "_max_parallel_marking_threads".
1136   }
1137   assert(n_conc_workers > 0, "Always need at least 1");
1138   return n_conc_workers;
1139 }
1140 
1141 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1142   // Currently, only survivors can be root regions.
1143   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1144   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1145 
1146   const uintx interval = PrefetchScanIntervalInBytes;
1147   HeapWord* curr = hr->bottom();
1148   const HeapWord* end = hr->top();
1149   while (curr < end) {
1150     Prefetch::read(curr, interval);
1151     oop obj = oop(curr);
1152     int size = obj->oop_iterate(&cl);
1153     assert(size == obj->size(), "sanity");
1154     curr += size;
1155   }
1156 }
1157 
1158 class CMRootRegionScanTask : public AbstractGangTask {
1159 private:
1160   ConcurrentMark* _cm;
1161 
1162 public:
1163   CMRootRegionScanTask(ConcurrentMark* cm) :
1164     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1165 
1166   void work(uint worker_id) {
1167     assert(Thread::current()->is_ConcurrentGC_thread(),
1168            "this should only be done by a conc GC thread");
1169 
1170     CMRootRegions* root_regions = _cm->root_regions();
1171     HeapRegion* hr = root_regions->claim_next();
1172     while (hr != NULL) {
1173       _cm->scanRootRegion(hr, worker_id);
1174       hr = root_regions->claim_next();
1175     }
1176   }
1177 };
1178 
1179 void ConcurrentMark::scanRootRegions() {
1180   // Start of concurrent marking.
1181   ClassLoaderDataGraph::clear_claimed_marks();
1182 
1183   // scan_in_progress() will have been set to true only if there was
1184   // at least one root region to scan. So, if it's false, we
1185   // should not attempt to do any further work.
1186   if (root_regions()->scan_in_progress()) {
1187     _parallel_marking_threads = calc_parallel_marking_threads();
1188     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1189            "Maximum number of marking threads exceeded");
1190     uint active_workers = MAX2(1U, parallel_marking_threads());
1191 
1192     CMRootRegionScanTask task(this);
1193     _parallel_workers->set_active_workers(active_workers);
1194     _parallel_workers->run_task(&task);
1195 
1196     // It's possible that has_aborted() is true here without actually
1197     // aborting the survivor scan earlier. This is OK as it's
1198     // mainly used for sanity checking.
1199     root_regions()->scan_finished();
1200   }
1201 }
1202 
1203 void ConcurrentMark::markFromRoots() {
1204   // we might be tempted to assert that:
1205   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1206   //        "inconsistent argument?");
1207   // However that wouldn't be right, because it's possible that
1208   // a safepoint is indeed in progress as a younger generation
1209   // stop-the-world GC happens even as we mark in this generation.
1210 
1211   _restart_for_overflow = false;
1212   force_overflow_conc()->init();
1213 
1214   // _g1h has _n_par_threads
1215   _parallel_marking_threads = calc_parallel_marking_threads();
1216   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1217     "Maximum number of marking threads exceeded");
1218 
1219   uint active_workers = MAX2(1U, parallel_marking_threads());
1220 
1221   // Parallel task terminator is set in "set_concurrency_and_phase()"
1222   set_concurrency_and_phase(active_workers, true /* concurrent */);
1223 
1224   CMConcurrentMarkingTask markingTask(this, cmThread());
1225   _parallel_workers->set_active_workers(active_workers);
1226   // Don't set _n_par_threads because it affects MT in process_roots()
1227   // and the decisions on that MT processing is made elsewhere.
1228   assert(_parallel_workers->active_workers() > 0, "Should have been set");
1229   _parallel_workers->run_task(&markingTask);
1230   print_stats();
1231 }
1232 
1233 // Helper class to get rid of some boilerplate code.
1234 class G1CMTraceTime : public GCTraceTime {
1235   static bool doit_and_prepend(bool doit) {
1236     if (doit) {
1237       gclog_or_tty->put(' ');
1238     }
1239     return doit;
1240   }
1241 
1242  public:
1243   G1CMTraceTime(const char* title, bool doit)
1244     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1245         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1246   }
1247 };
1248 
1249 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1250   // world is stopped at this checkpoint
1251   assert(SafepointSynchronize::is_at_safepoint(),
1252          "world should be stopped");
1253 
1254   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1255 
1256   // If a full collection has happened, we shouldn't do this.
1257   if (has_aborted()) {
1258     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1259     return;
1260   }
1261 
1262   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1263 
1264   if (VerifyDuringGC) {
1265     HandleMark hm;  // handle scope
1266     g1h->prepare_for_verify();
1267     Universe::verify(VerifyOption_G1UsePrevMarking,
1268                      " VerifyDuringGC:(before)");
1269   }
1270   g1h->check_bitmaps("Remark Start");
1271 
1272   G1CollectorPolicy* g1p = g1h->g1_policy();
1273   g1p->record_concurrent_mark_remark_start();
1274 
1275   double start = os::elapsedTime();
1276 
1277   checkpointRootsFinalWork();
1278 
1279   double mark_work_end = os::elapsedTime();
1280 
1281   weakRefsWork(clear_all_soft_refs);
1282 
1283   if (has_overflown()) {
1284     // Oops.  We overflowed.  Restart concurrent marking.
1285     _restart_for_overflow = true;
1286     if (G1TraceMarkStackOverflow) {
1287       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1288     }
1289 
1290     // Verify the heap w.r.t. the previous marking bitmap.
1291     if (VerifyDuringGC) {
1292       HandleMark hm;  // handle scope
1293       g1h->prepare_for_verify();
1294       Universe::verify(VerifyOption_G1UsePrevMarking,
1295                        " VerifyDuringGC:(overflow)");
1296     }
1297 
1298     // Clear the marking state because we will be restarting
1299     // marking due to overflowing the global mark stack.
1300     reset_marking_state();
1301   } else {
1302     {
1303       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1304 
1305       // Aggregate the per-task counting data that we have accumulated
1306       // while marking.
1307       aggregate_count_data();
1308     }
1309 
1310     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1311     // We're done with marking.
1312     // This is the end of  the marking cycle, we're expected all
1313     // threads to have SATB queues with active set to true.
1314     satb_mq_set.set_active_all_threads(false, /* new active value */
1315                                        true /* expected_active */);
1316 
1317     if (VerifyDuringGC) {
1318       HandleMark hm;  // handle scope
1319       g1h->prepare_for_verify();
1320       Universe::verify(VerifyOption_G1UseNextMarking,
1321                        " VerifyDuringGC:(after)");
1322     }
1323     g1h->check_bitmaps("Remark End");
1324     assert(!restart_for_overflow(), "sanity");
1325     // Completely reset the marking state since marking completed
1326     set_non_marking_state();
1327   }
1328 
1329   // Expand the marking stack, if we have to and if we can.
1330   if (_markStack.should_expand()) {
1331     _markStack.expand();
1332   }
1333 
1334   // Statistics
1335   double now = os::elapsedTime();
1336   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1337   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1338   _remark_times.add((now - start) * 1000.0);
1339 
1340   g1p->record_concurrent_mark_remark_end();
1341 
1342   G1CMIsAliveClosure is_alive(g1h);
1343   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1344 }
1345 
1346 // Base class of the closures that finalize and verify the
1347 // liveness counting data.
1348 class CMCountDataClosureBase: public HeapRegionClosure {
1349 protected:
1350   G1CollectedHeap* _g1h;
1351   ConcurrentMark* _cm;
1352   CardTableModRefBS* _ct_bs;
1353 
1354   BitMap* _region_bm;
1355   BitMap* _card_bm;
1356 
1357   // Takes a region that's not empty (i.e., it has at least one
1358   // live object in it and sets its corresponding bit on the region
1359   // bitmap to 1. If the region is "starts humongous" it will also set
1360   // to 1 the bits on the region bitmap that correspond to its
1361   // associated "continues humongous" regions.
1362   void set_bit_for_region(HeapRegion* hr) {
1363     assert(!hr->is_continues_humongous(), "should have filtered those out");
1364 
1365     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1366     if (!hr->is_starts_humongous()) {
1367       // Normal (non-humongous) case: just set the bit.
1368       _region_bm->par_at_put(index, true);
1369     } else {
1370       // Starts humongous case: calculate how many regions are part of
1371       // this humongous region and then set the bit range.
1372       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1373       _region_bm->par_at_put_range(index, end_index, true);
1374     }
1375   }
1376 
1377 public:
1378   CMCountDataClosureBase(G1CollectedHeap* g1h,
1379                          BitMap* region_bm, BitMap* card_bm):
1380     _g1h(g1h), _cm(g1h->concurrent_mark()),
1381     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1382     _region_bm(region_bm), _card_bm(card_bm) { }
1383 };
1384 
1385 // Closure that calculates the # live objects per region. Used
1386 // for verification purposes during the cleanup pause.
1387 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1388   CMBitMapRO* _bm;
1389   size_t _region_marked_bytes;
1390 
1391 public:
1392   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1393                          BitMap* region_bm, BitMap* card_bm) :
1394     CMCountDataClosureBase(g1h, region_bm, card_bm),
1395     _bm(bm), _region_marked_bytes(0) { }
1396 
1397   bool doHeapRegion(HeapRegion* hr) {
1398 
1399     if (hr->is_continues_humongous()) {
1400       // We will ignore these here and process them when their
1401       // associated "starts humongous" region is processed (see
1402       // set_bit_for_heap_region()). Note that we cannot rely on their
1403       // associated "starts humongous" region to have their bit set to
1404       // 1 since, due to the region chunking in the parallel region
1405       // iteration, a "continues humongous" region might be visited
1406       // before its associated "starts humongous".
1407       return false;
1408     }
1409 
1410     HeapWord* ntams = hr->next_top_at_mark_start();
1411     HeapWord* start = hr->bottom();
1412 
1413     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1414            err_msg("Preconditions not met - "
1415                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1416                    p2i(start), p2i(ntams), p2i(hr->end())));
1417 
1418     // Find the first marked object at or after "start".
1419     start = _bm->getNextMarkedWordAddress(start, ntams);
1420 
1421     size_t marked_bytes = 0;
1422 
1423     while (start < ntams) {
1424       oop obj = oop(start);
1425       int obj_sz = obj->size();
1426       HeapWord* obj_end = start + obj_sz;
1427 
1428       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1429       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1430 
1431       // Note: if we're looking at the last region in heap - obj_end
1432       // could be actually just beyond the end of the heap; end_idx
1433       // will then correspond to a (non-existent) card that is also
1434       // just beyond the heap.
1435       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1436         // end of object is not card aligned - increment to cover
1437         // all the cards spanned by the object
1438         end_idx += 1;
1439       }
1440 
1441       // Set the bits in the card BM for the cards spanned by this object.
1442       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1443 
1444       // Add the size of this object to the number of marked bytes.
1445       marked_bytes += (size_t)obj_sz * HeapWordSize;
1446 
1447       // Find the next marked object after this one.
1448       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1449     }
1450 
1451     // Mark the allocated-since-marking portion...
1452     HeapWord* top = hr->top();
1453     if (ntams < top) {
1454       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1455       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1456 
1457       // Note: if we're looking at the last region in heap - top
1458       // could be actually just beyond the end of the heap; end_idx
1459       // will then correspond to a (non-existent) card that is also
1460       // just beyond the heap.
1461       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1462         // end of object is not card aligned - increment to cover
1463         // all the cards spanned by the object
1464         end_idx += 1;
1465       }
1466       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1467 
1468       // This definitely means the region has live objects.
1469       set_bit_for_region(hr);
1470     }
1471 
1472     // Update the live region bitmap.
1473     if (marked_bytes > 0) {
1474       set_bit_for_region(hr);
1475     }
1476 
1477     // Set the marked bytes for the current region so that
1478     // it can be queried by a calling verification routine
1479     _region_marked_bytes = marked_bytes;
1480 
1481     return false;
1482   }
1483 
1484   size_t region_marked_bytes() const { return _region_marked_bytes; }
1485 };
1486 
1487 // Heap region closure used for verifying the counting data
1488 // that was accumulated concurrently and aggregated during
1489 // the remark pause. This closure is applied to the heap
1490 // regions during the STW cleanup pause.
1491 
1492 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1493   G1CollectedHeap* _g1h;
1494   ConcurrentMark* _cm;
1495   CalcLiveObjectsClosure _calc_cl;
1496   BitMap* _region_bm;   // Region BM to be verified
1497   BitMap* _card_bm;     // Card BM to be verified
1498   bool _verbose;        // verbose output?
1499 
1500   BitMap* _exp_region_bm; // Expected Region BM values
1501   BitMap* _exp_card_bm;   // Expected card BM values
1502 
1503   int _failures;
1504 
1505 public:
1506   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1507                                 BitMap* region_bm,
1508                                 BitMap* card_bm,
1509                                 BitMap* exp_region_bm,
1510                                 BitMap* exp_card_bm,
1511                                 bool verbose) :
1512     _g1h(g1h), _cm(g1h->concurrent_mark()),
1513     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1514     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1515     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1516     _failures(0) { }
1517 
1518   int failures() const { return _failures; }
1519 
1520   bool doHeapRegion(HeapRegion* hr) {
1521     if (hr->is_continues_humongous()) {
1522       // We will ignore these here and process them when their
1523       // associated "starts humongous" region is processed (see
1524       // set_bit_for_heap_region()). Note that we cannot rely on their
1525       // associated "starts humongous" region to have their bit set to
1526       // 1 since, due to the region chunking in the parallel region
1527       // iteration, a "continues humongous" region might be visited
1528       // before its associated "starts humongous".
1529       return false;
1530     }
1531 
1532     int failures = 0;
1533 
1534     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1535     // this region and set the corresponding bits in the expected region
1536     // and card bitmaps.
1537     bool res = _calc_cl.doHeapRegion(hr);
1538     assert(res == false, "should be continuing");
1539 
1540     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1541                     Mutex::_no_safepoint_check_flag);
1542 
1543     // Verify the marked bytes for this region.
1544     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1545     size_t act_marked_bytes = hr->next_marked_bytes();
1546 
1547     // We're not OK if expected marked bytes > actual marked bytes. It means
1548     // we have missed accounting some objects during the actual marking.
1549     if (exp_marked_bytes > act_marked_bytes) {
1550       if (_verbose) {
1551         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1552                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1553                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1554       }
1555       failures += 1;
1556     }
1557 
1558     // Verify the bit, for this region, in the actual and expected
1559     // (which was just calculated) region bit maps.
1560     // We're not OK if the bit in the calculated expected region
1561     // bitmap is set and the bit in the actual region bitmap is not.
1562     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1563 
1564     bool expected = _exp_region_bm->at(index);
1565     bool actual = _region_bm->at(index);
1566     if (expected && !actual) {
1567       if (_verbose) {
1568         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1569                                "expected: %s, actual: %s",
1570                                hr->hrm_index(),
1571                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1572       }
1573       failures += 1;
1574     }
1575 
1576     // Verify that the card bit maps for the cards spanned by the current
1577     // region match. We have an error if we have a set bit in the expected
1578     // bit map and the corresponding bit in the actual bitmap is not set.
1579 
1580     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1581     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1582 
1583     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1584       expected = _exp_card_bm->at(i);
1585       actual = _card_bm->at(i);
1586 
1587       if (expected && !actual) {
1588         if (_verbose) {
1589           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1590                                  "expected: %s, actual: %s",
1591                                  hr->hrm_index(), i,
1592                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1593         }
1594         failures += 1;
1595       }
1596     }
1597 
1598     if (failures > 0 && _verbose)  {
1599       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1600                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1601                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1602                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1603     }
1604 
1605     _failures += failures;
1606 
1607     // We could stop iteration over the heap when we
1608     // find the first violating region by returning true.
1609     return false;
1610   }
1611 };
1612 
1613 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1614 protected:
1615   G1CollectedHeap* _g1h;
1616   ConcurrentMark* _cm;
1617   BitMap* _actual_region_bm;
1618   BitMap* _actual_card_bm;
1619 
1620   uint    _n_workers;
1621 
1622   BitMap* _expected_region_bm;
1623   BitMap* _expected_card_bm;
1624 
1625   int  _failures;
1626   bool _verbose;
1627 
1628   HeapRegionClaimer _hrclaimer;
1629 
1630 public:
1631   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1632                             BitMap* region_bm, BitMap* card_bm,
1633                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1634     : AbstractGangTask("G1 verify final counting"),
1635       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1636       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1637       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1638       _failures(0), _verbose(false),
1639       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1640     assert(VerifyDuringGC, "don't call this otherwise");
1641     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1642     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1643 
1644     _verbose = _cm->verbose_medium();
1645   }
1646 
1647   void work(uint worker_id) {
1648     assert(worker_id < _n_workers, "invariant");
1649 
1650     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1651                                             _actual_region_bm, _actual_card_bm,
1652                                             _expected_region_bm,
1653                                             _expected_card_bm,
1654                                             _verbose);
1655 
1656     _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
1657 
1658     Atomic::add(verify_cl.failures(), &_failures);
1659   }
1660 
1661   int failures() const { return _failures; }
1662 };
1663 
1664 // Closure that finalizes the liveness counting data.
1665 // Used during the cleanup pause.
1666 // Sets the bits corresponding to the interval [NTAMS, top]
1667 // (which contains the implicitly live objects) in the
1668 // card liveness bitmap. Also sets the bit for each region,
1669 // containing live data, in the region liveness bitmap.
1670 
1671 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1672  public:
1673   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1674                               BitMap* region_bm,
1675                               BitMap* card_bm) :
1676     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1677 
1678   bool doHeapRegion(HeapRegion* hr) {
1679 
1680     if (hr->is_continues_humongous()) {
1681       // We will ignore these here and process them when their
1682       // associated "starts humongous" region is processed (see
1683       // set_bit_for_heap_region()). Note that we cannot rely on their
1684       // associated "starts humongous" region to have their bit set to
1685       // 1 since, due to the region chunking in the parallel region
1686       // iteration, a "continues humongous" region might be visited
1687       // before its associated "starts humongous".
1688       return false;
1689     }
1690 
1691     HeapWord* ntams = hr->next_top_at_mark_start();
1692     HeapWord* top   = hr->top();
1693 
1694     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1695 
1696     // Mark the allocated-since-marking portion...
1697     if (ntams < top) {
1698       // This definitely means the region has live objects.
1699       set_bit_for_region(hr);
1700 
1701       // Now set the bits in the card bitmap for [ntams, top)
1702       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1703       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1704 
1705       // Note: if we're looking at the last region in heap - top
1706       // could be actually just beyond the end of the heap; end_idx
1707       // will then correspond to a (non-existent) card that is also
1708       // just beyond the heap.
1709       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1710         // end of object is not card aligned - increment to cover
1711         // all the cards spanned by the object
1712         end_idx += 1;
1713       }
1714 
1715       assert(end_idx <= _card_bm->size(),
1716              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1717                      end_idx, _card_bm->size()));
1718       assert(start_idx < _card_bm->size(),
1719              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1720                      start_idx, _card_bm->size()));
1721 
1722       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1723     }
1724 
1725     // Set the bit for the region if it contains live data
1726     if (hr->next_marked_bytes() > 0) {
1727       set_bit_for_region(hr);
1728     }
1729 
1730     return false;
1731   }
1732 };
1733 
1734 class G1ParFinalCountTask: public AbstractGangTask {
1735 protected:
1736   G1CollectedHeap* _g1h;
1737   ConcurrentMark* _cm;
1738   BitMap* _actual_region_bm;
1739   BitMap* _actual_card_bm;
1740 
1741   uint    _n_workers;
1742   HeapRegionClaimer _hrclaimer;
1743 
1744 public:
1745   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1746     : AbstractGangTask("G1 final counting"),
1747       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1748       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1749       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1750   }
1751 
1752   void work(uint worker_id) {
1753     assert(worker_id < _n_workers, "invariant");
1754 
1755     FinalCountDataUpdateClosure final_update_cl(_g1h,
1756                                                 _actual_region_bm,
1757                                                 _actual_card_bm);
1758 
1759     _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
1760   }
1761 };
1762 
1763 class G1ParNoteEndTask;
1764 
1765 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1766   G1CollectedHeap* _g1;
1767   size_t _max_live_bytes;
1768   uint _regions_claimed;
1769   size_t _freed_bytes;
1770   FreeRegionList* _local_cleanup_list;
1771   HeapRegionSetCount _old_regions_removed;
1772   HeapRegionSetCount _humongous_regions_removed;
1773   HRRSCleanupTask* _hrrs_cleanup_task;
1774   double _claimed_region_time;
1775   double _max_region_time;
1776 
1777 public:
1778   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1779                              FreeRegionList* local_cleanup_list,
1780                              HRRSCleanupTask* hrrs_cleanup_task) :
1781     _g1(g1),
1782     _max_live_bytes(0), _regions_claimed(0),
1783     _freed_bytes(0),
1784     _claimed_region_time(0.0), _max_region_time(0.0),
1785     _local_cleanup_list(local_cleanup_list),
1786     _old_regions_removed(),
1787     _humongous_regions_removed(),
1788     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1789 
1790   size_t freed_bytes() { return _freed_bytes; }
1791   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1792   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1793 
1794   bool doHeapRegion(HeapRegion *hr) {
1795     if (hr->is_continues_humongous()) {
1796       return false;
1797     }
1798     // We use a claim value of zero here because all regions
1799     // were claimed with value 1 in the FinalCount task.
1800     _g1->reset_gc_time_stamps(hr);
1801     double start = os::elapsedTime();
1802     _regions_claimed++;
1803     hr->note_end_of_marking();
1804     _max_live_bytes += hr->max_live_bytes();
1805 
1806     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1807       _freed_bytes += hr->used();
1808       hr->set_containing_set(NULL);
1809       if (hr->is_humongous()) {
1810         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1811         _humongous_regions_removed.increment(1u, hr->capacity());
1812         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1813       } else {
1814         _old_regions_removed.increment(1u, hr->capacity());
1815         _g1->free_region(hr, _local_cleanup_list, true);
1816       }
1817     } else {
1818       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1819     }
1820 
1821     double region_time = (os::elapsedTime() - start);
1822     _claimed_region_time += region_time;
1823     if (region_time > _max_region_time) {
1824       _max_region_time = region_time;
1825     }
1826     return false;
1827   }
1828 
1829   size_t max_live_bytes() { return _max_live_bytes; }
1830   uint regions_claimed() { return _regions_claimed; }
1831   double claimed_region_time_sec() { return _claimed_region_time; }
1832   double max_region_time_sec() { return _max_region_time; }
1833 };
1834 
1835 class G1ParNoteEndTask: public AbstractGangTask {
1836   friend class G1NoteEndOfConcMarkClosure;
1837 
1838 protected:
1839   G1CollectedHeap* _g1h;
1840   size_t _max_live_bytes;
1841   size_t _freed_bytes;
1842   FreeRegionList* _cleanup_list;
1843   HeapRegionClaimer _hrclaimer;
1844 
1845 public:
1846   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1847       AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1848   }
1849 
1850   void work(uint worker_id) {
1851     FreeRegionList local_cleanup_list("Local Cleanup List");
1852     HRRSCleanupTask hrrs_cleanup_task;
1853     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1854                                            &hrrs_cleanup_task);
1855     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1856     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1857 
1858     // Now update the lists
1859     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1860     {
1861       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1862       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1863       _max_live_bytes += g1_note_end.max_live_bytes();
1864       _freed_bytes += g1_note_end.freed_bytes();
1865 
1866       // If we iterate over the global cleanup list at the end of
1867       // cleanup to do this printing we will not guarantee to only
1868       // generate output for the newly-reclaimed regions (the list
1869       // might not be empty at the beginning of cleanup; we might
1870       // still be working on its previous contents). So we do the
1871       // printing here, before we append the new regions to the global
1872       // cleanup list.
1873 
1874       G1HRPrinter* hr_printer = _g1h->hr_printer();
1875       if (hr_printer->is_active()) {
1876         FreeRegionListIterator iter(&local_cleanup_list);
1877         while (iter.more_available()) {
1878           HeapRegion* hr = iter.get_next();
1879           hr_printer->cleanup(hr);
1880         }
1881       }
1882 
1883       _cleanup_list->add_ordered(&local_cleanup_list);
1884       assert(local_cleanup_list.is_empty(), "post-condition");
1885 
1886       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1887     }
1888   }
1889   size_t max_live_bytes() { return _max_live_bytes; }
1890   size_t freed_bytes() { return _freed_bytes; }
1891 };
1892 
1893 class G1ParScrubRemSetTask: public AbstractGangTask {
1894 protected:
1895   G1RemSet* _g1rs;
1896   BitMap* _region_bm;
1897   BitMap* _card_bm;
1898   HeapRegionClaimer _hrclaimer;
1899 
1900 public:
1901   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1902       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
1903   }
1904 
1905   void work(uint worker_id) {
1906     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
1907   }
1908 
1909 };
1910 
1911 void ConcurrentMark::cleanup() {
1912   // world is stopped at this checkpoint
1913   assert(SafepointSynchronize::is_at_safepoint(),
1914          "world should be stopped");
1915   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1916 
1917   // If a full collection has happened, we shouldn't do this.
1918   if (has_aborted()) {
1919     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1920     return;
1921   }
1922 
1923   g1h->verify_region_sets_optional();
1924 
1925   if (VerifyDuringGC) {
1926     HandleMark hm;  // handle scope
1927     g1h->prepare_for_verify();
1928     Universe::verify(VerifyOption_G1UsePrevMarking,
1929                      " VerifyDuringGC:(before)");
1930   }
1931   g1h->check_bitmaps("Cleanup Start");
1932 
1933   G1CollectorPolicy* g1p = g1h->g1_policy();
1934   g1p->record_concurrent_mark_cleanup_start();
1935 
1936   double start = os::elapsedTime();
1937 
1938   HeapRegionRemSet::reset_for_cleanup_tasks();
1939 
1940   // Do counting once more with the world stopped for good measure.
1941   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1942 
1943   g1h->set_par_threads();
1944   uint n_workers = _g1h->workers()->active_workers();
1945   g1h->workers()->run_task(&g1_par_count_task);
1946   // Done with the parallel phase so reset to 0.
1947   g1h->set_par_threads(0);
1948 
1949   if (VerifyDuringGC) {
1950     // Verify that the counting data accumulated during marking matches
1951     // that calculated by walking the marking bitmap.
1952 
1953     // Bitmaps to hold expected values
1954     BitMap expected_region_bm(_region_bm.size(), true);
1955     BitMap expected_card_bm(_card_bm.size(), true);
1956 
1957     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1958                                                  &_region_bm,
1959                                                  &_card_bm,
1960                                                  &expected_region_bm,
1961                                                  &expected_card_bm);
1962 
1963     g1h->set_par_threads((int)n_workers);
1964     g1h->workers()->run_task(&g1_par_verify_task);
1965     // Done with the parallel phase so reset to 0.
1966     g1h->set_par_threads(0);
1967 
1968     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1969   }
1970 
1971   size_t start_used_bytes = g1h->used();
1972   g1h->set_marking_complete();
1973 
1974   double count_end = os::elapsedTime();
1975   double this_final_counting_time = (count_end - start);
1976   _total_counting_time += this_final_counting_time;
1977 
1978   if (G1PrintRegionLivenessInfo) {
1979     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1980     _g1h->heap_region_iterate(&cl);
1981   }
1982 
1983   // Install newly created mark bitMap as "prev".
1984   swapMarkBitMaps();
1985 
1986   g1h->reset_gc_time_stamp();
1987 
1988   // Note end of marking in all heap regions.
1989   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1990   g1h->set_par_threads((int)n_workers);
1991   g1h->workers()->run_task(&g1_par_note_end_task);
1992   g1h->set_par_threads(0);
1993   g1h->check_gc_time_stamps();
1994 
1995   if (!cleanup_list_is_empty()) {
1996     // The cleanup list is not empty, so we'll have to process it
1997     // concurrently. Notify anyone else that might be wanting free
1998     // regions that there will be more free regions coming soon.
1999     g1h->set_free_regions_coming();
2000   }
2001 
2002   // call below, since it affects the metric by which we sort the heap
2003   // regions.
2004   if (G1ScrubRemSets) {
2005     double rs_scrub_start = os::elapsedTime();
2006     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2007     g1h->set_par_threads((int)n_workers);
2008     g1h->workers()->run_task(&g1_par_scrub_rs_task);
2009     g1h->set_par_threads(0);
2010 
2011     double rs_scrub_end = os::elapsedTime();
2012     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2013     _total_rs_scrub_time += this_rs_scrub_time;
2014   }
2015 
2016   // this will also free any regions totally full of garbage objects,
2017   // and sort the regions.
2018   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2019 
2020   // Statistics.
2021   double end = os::elapsedTime();
2022   _cleanup_times.add((end - start) * 1000.0);
2023 
2024   if (G1Log::fine()) {
2025     g1h->g1_policy()->print_heap_transition(start_used_bytes);
2026   }
2027 
2028   // Clean up will have freed any regions completely full of garbage.
2029   // Update the soft reference policy with the new heap occupancy.
2030   Universe::update_heap_info_at_gc();
2031 
2032   if (VerifyDuringGC) {
2033     HandleMark hm;  // handle scope
2034     g1h->prepare_for_verify();
2035     Universe::verify(VerifyOption_G1UsePrevMarking,
2036                      " VerifyDuringGC:(after)");
2037   }
2038 
2039   g1h->check_bitmaps("Cleanup End");
2040 
2041   g1h->verify_region_sets_optional();
2042 
2043   // We need to make this be a "collection" so any collection pause that
2044   // races with it goes around and waits for completeCleanup to finish.
2045   g1h->increment_total_collections();
2046 
2047   // Clean out dead classes and update Metaspace sizes.
2048   if (ClassUnloadingWithConcurrentMark) {
2049     ClassLoaderDataGraph::purge();
2050   }
2051   MetaspaceGC::compute_new_size();
2052 
2053   // We reclaimed old regions so we should calculate the sizes to make
2054   // sure we update the old gen/space data.
2055   g1h->g1mm()->update_sizes();
2056   g1h->allocation_context_stats().update_after_mark();
2057 
2058   g1h->trace_heap_after_concurrent_cycle();
2059 }
2060 
2061 void ConcurrentMark::completeCleanup() {
2062   if (has_aborted()) return;
2063 
2064   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2065 
2066   _cleanup_list.verify_optional();
2067   FreeRegionList tmp_free_list("Tmp Free List");
2068 
2069   if (G1ConcRegionFreeingVerbose) {
2070     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2071                            "cleanup list has %u entries",
2072                            _cleanup_list.length());
2073   }
2074 
2075   // No one else should be accessing the _cleanup_list at this point,
2076   // so it is not necessary to take any locks
2077   while (!_cleanup_list.is_empty()) {
2078     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2079     assert(hr != NULL, "Got NULL from a non-empty list");
2080     hr->par_clear();
2081     tmp_free_list.add_ordered(hr);
2082 
2083     // Instead of adding one region at a time to the secondary_free_list,
2084     // we accumulate them in the local list and move them a few at a
2085     // time. This also cuts down on the number of notify_all() calls
2086     // we do during this process. We'll also append the local list when
2087     // _cleanup_list is empty (which means we just removed the last
2088     // region from the _cleanup_list).
2089     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2090         _cleanup_list.is_empty()) {
2091       if (G1ConcRegionFreeingVerbose) {
2092         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2093                                "appending %u entries to the secondary_free_list, "
2094                                "cleanup list still has %u entries",
2095                                tmp_free_list.length(),
2096                                _cleanup_list.length());
2097       }
2098 
2099       {
2100         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2101         g1h->secondary_free_list_add(&tmp_free_list);
2102         SecondaryFreeList_lock->notify_all();
2103       }
2104 #ifndef PRODUCT
2105       if (G1StressConcRegionFreeing) {
2106         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2107           os::sleep(Thread::current(), (jlong) 1, false);
2108         }
2109       }
2110 #endif
2111     }
2112   }
2113   assert(tmp_free_list.is_empty(), "post-condition");
2114 }
2115 
2116 // Supporting Object and Oop closures for reference discovery
2117 // and processing in during marking
2118 
2119 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2120   HeapWord* addr = (HeapWord*)obj;
2121   return addr != NULL &&
2122          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2123 }
2124 
2125 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2126 // Uses the CMTask associated with a worker thread (for serial reference
2127 // processing the CMTask for worker 0 is used) to preserve (mark) and
2128 // trace referent objects.
2129 //
2130 // Using the CMTask and embedded local queues avoids having the worker
2131 // threads operating on the global mark stack. This reduces the risk
2132 // of overflowing the stack - which we would rather avoid at this late
2133 // state. Also using the tasks' local queues removes the potential
2134 // of the workers interfering with each other that could occur if
2135 // operating on the global stack.
2136 
2137 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2138   ConcurrentMark* _cm;
2139   CMTask*         _task;
2140   int             _ref_counter_limit;
2141   int             _ref_counter;
2142   bool            _is_serial;
2143  public:
2144   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2145     _cm(cm), _task(task), _is_serial(is_serial),
2146     _ref_counter_limit(G1RefProcDrainInterval) {
2147     assert(_ref_counter_limit > 0, "sanity");
2148     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2149     _ref_counter = _ref_counter_limit;
2150   }
2151 
2152   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2153   virtual void do_oop(      oop* p) { do_oop_work(p); }
2154 
2155   template <class T> void do_oop_work(T* p) {
2156     if (!_cm->has_overflown()) {
2157       oop obj = oopDesc::load_decode_heap_oop(p);
2158       if (_cm->verbose_high()) {
2159         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2160                                "*"PTR_FORMAT" = "PTR_FORMAT,
2161                                _task->worker_id(), p2i(p), p2i((void*) obj));
2162       }
2163 
2164       _task->deal_with_reference(obj);
2165       _ref_counter--;
2166 
2167       if (_ref_counter == 0) {
2168         // We have dealt with _ref_counter_limit references, pushing them
2169         // and objects reachable from them on to the local stack (and
2170         // possibly the global stack). Call CMTask::do_marking_step() to
2171         // process these entries.
2172         //
2173         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2174         // there's nothing more to do (i.e. we're done with the entries that
2175         // were pushed as a result of the CMTask::deal_with_reference() calls
2176         // above) or we overflow.
2177         //
2178         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2179         // flag while there may still be some work to do. (See the comment at
2180         // the beginning of CMTask::do_marking_step() for those conditions -
2181         // one of which is reaching the specified time target.) It is only
2182         // when CMTask::do_marking_step() returns without setting the
2183         // has_aborted() flag that the marking step has completed.
2184         do {
2185           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2186           _task->do_marking_step(mark_step_duration_ms,
2187                                  false      /* do_termination */,
2188                                  _is_serial);
2189         } while (_task->has_aborted() && !_cm->has_overflown());
2190         _ref_counter = _ref_counter_limit;
2191       }
2192     } else {
2193       if (_cm->verbose_high()) {
2194          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2195       }
2196     }
2197   }
2198 };
2199 
2200 // 'Drain' oop closure used by both serial and parallel reference processing.
2201 // Uses the CMTask associated with a given worker thread (for serial
2202 // reference processing the CMtask for worker 0 is used). Calls the
2203 // do_marking_step routine, with an unbelievably large timeout value,
2204 // to drain the marking data structures of the remaining entries
2205 // added by the 'keep alive' oop closure above.
2206 
2207 class G1CMDrainMarkingStackClosure: public VoidClosure {
2208   ConcurrentMark* _cm;
2209   CMTask*         _task;
2210   bool            _is_serial;
2211  public:
2212   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2213     _cm(cm), _task(task), _is_serial(is_serial) {
2214     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2215   }
2216 
2217   void do_void() {
2218     do {
2219       if (_cm->verbose_high()) {
2220         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2221                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2222       }
2223 
2224       // We call CMTask::do_marking_step() to completely drain the local
2225       // and global marking stacks of entries pushed by the 'keep alive'
2226       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2227       //
2228       // CMTask::do_marking_step() is called in a loop, which we'll exit
2229       // if there's nothing more to do (i.e. we've completely drained the
2230       // entries that were pushed as a a result of applying the 'keep alive'
2231       // closure to the entries on the discovered ref lists) or we overflow
2232       // the global marking stack.
2233       //
2234       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2235       // flag while there may still be some work to do. (See the comment at
2236       // the beginning of CMTask::do_marking_step() for those conditions -
2237       // one of which is reaching the specified time target.) It is only
2238       // when CMTask::do_marking_step() returns without setting the
2239       // has_aborted() flag that the marking step has completed.
2240 
2241       _task->do_marking_step(1000000000.0 /* something very large */,
2242                              true         /* do_termination */,
2243                              _is_serial);
2244     } while (_task->has_aborted() && !_cm->has_overflown());
2245   }
2246 };
2247 
2248 // Implementation of AbstractRefProcTaskExecutor for parallel
2249 // reference processing at the end of G1 concurrent marking
2250 
2251 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2252 private:
2253   G1CollectedHeap* _g1h;
2254   ConcurrentMark*  _cm;
2255   WorkGang*        _workers;
2256   uint             _active_workers;
2257 
2258 public:
2259   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2260                           ConcurrentMark* cm,
2261                           WorkGang* workers,
2262                           uint n_workers) :
2263     _g1h(g1h), _cm(cm),
2264     _workers(workers), _active_workers(n_workers) { }
2265 
2266   // Executes the given task using concurrent marking worker threads.
2267   virtual void execute(ProcessTask& task);
2268   virtual void execute(EnqueueTask& task);
2269 };
2270 
2271 class G1CMRefProcTaskProxy: public AbstractGangTask {
2272   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2273   ProcessTask&     _proc_task;
2274   G1CollectedHeap* _g1h;
2275   ConcurrentMark*  _cm;
2276 
2277 public:
2278   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2279                      G1CollectedHeap* g1h,
2280                      ConcurrentMark* cm) :
2281     AbstractGangTask("Process reference objects in parallel"),
2282     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2283     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2284     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2285   }
2286 
2287   virtual void work(uint worker_id) {
2288     ResourceMark rm;
2289     HandleMark hm;
2290     CMTask* task = _cm->task(worker_id);
2291     G1CMIsAliveClosure g1_is_alive(_g1h);
2292     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2293     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2294 
2295     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2296   }
2297 };
2298 
2299 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2300   assert(_workers != NULL, "Need parallel worker threads.");
2301   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2302 
2303   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2304 
2305   // We need to reset the concurrency level before each
2306   // proxy task execution, so that the termination protocol
2307   // and overflow handling in CMTask::do_marking_step() knows
2308   // how many workers to wait for.
2309   _cm->set_concurrency(_active_workers);
2310   _g1h->set_par_threads(_active_workers);
2311   _workers->run_task(&proc_task_proxy);
2312   _g1h->set_par_threads(0);
2313 }
2314 
2315 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2316   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2317   EnqueueTask& _enq_task;
2318 
2319 public:
2320   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2321     AbstractGangTask("Enqueue reference objects in parallel"),
2322     _enq_task(enq_task) { }
2323 
2324   virtual void work(uint worker_id) {
2325     _enq_task.work(worker_id);
2326   }
2327 };
2328 
2329 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2330   assert(_workers != NULL, "Need parallel worker threads.");
2331   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2332 
2333   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2334 
2335   // Not strictly necessary but...
2336   //
2337   // We need to reset the concurrency level before each
2338   // proxy task execution, so that the termination protocol
2339   // and overflow handling in CMTask::do_marking_step() knows
2340   // how many workers to wait for.
2341   _cm->set_concurrency(_active_workers);
2342   _g1h->set_par_threads(_active_workers);
2343   _workers->run_task(&enq_task_proxy);
2344   _g1h->set_par_threads(0);
2345 }
2346 
2347 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2348   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2349 }
2350 
2351 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2352   if (has_overflown()) {
2353     // Skip processing the discovered references if we have
2354     // overflown the global marking stack. Reference objects
2355     // only get discovered once so it is OK to not
2356     // de-populate the discovered reference lists. We could have,
2357     // but the only benefit would be that, when marking restarts,
2358     // less reference objects are discovered.
2359     return;
2360   }
2361 
2362   ResourceMark rm;
2363   HandleMark   hm;
2364 
2365   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2366 
2367   // Is alive closure.
2368   G1CMIsAliveClosure g1_is_alive(g1h);
2369 
2370   // Inner scope to exclude the cleaning of the string and symbol
2371   // tables from the displayed time.
2372   {
2373     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2374 
2375     ReferenceProcessor* rp = g1h->ref_processor_cm();
2376 
2377     // See the comment in G1CollectedHeap::ref_processing_init()
2378     // about how reference processing currently works in G1.
2379 
2380     // Set the soft reference policy
2381     rp->setup_policy(clear_all_soft_refs);
2382     assert(_markStack.isEmpty(), "mark stack should be empty");
2383 
2384     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2385     // in serial reference processing. Note these closures are also
2386     // used for serially processing (by the the current thread) the
2387     // JNI references during parallel reference processing.
2388     //
2389     // These closures do not need to synchronize with the worker
2390     // threads involved in parallel reference processing as these
2391     // instances are executed serially by the current thread (e.g.
2392     // reference processing is not multi-threaded and is thus
2393     // performed by the current thread instead of a gang worker).
2394     //
2395     // The gang tasks involved in parallel reference processing create
2396     // their own instances of these closures, which do their own
2397     // synchronization among themselves.
2398     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2399     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2400 
2401     // We need at least one active thread. If reference processing
2402     // is not multi-threaded we use the current (VMThread) thread,
2403     // otherwise we use the work gang from the G1CollectedHeap and
2404     // we utilize all the worker threads we can.
2405     bool processing_is_mt = rp->processing_is_mt();
2406     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2407     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2408 
2409     // Parallel processing task executor.
2410     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2411                                               g1h->workers(), active_workers);
2412     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2413 
2414     // Set the concurrency level. The phase was already set prior to
2415     // executing the remark task.
2416     set_concurrency(active_workers);
2417 
2418     // Set the degree of MT processing here.  If the discovery was done MT,
2419     // the number of threads involved during discovery could differ from
2420     // the number of active workers.  This is OK as long as the discovered
2421     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2422     rp->set_active_mt_degree(active_workers);
2423 
2424     // Process the weak references.
2425     const ReferenceProcessorStats& stats =
2426         rp->process_discovered_references(&g1_is_alive,
2427                                           &g1_keep_alive,
2428                                           &g1_drain_mark_stack,
2429                                           executor,
2430                                           g1h->gc_timer_cm(),
2431                                           concurrent_gc_id());
2432     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2433 
2434     // The do_oop work routines of the keep_alive and drain_marking_stack
2435     // oop closures will set the has_overflown flag if we overflow the
2436     // global marking stack.
2437 
2438     assert(_markStack.overflow() || _markStack.isEmpty(),
2439             "mark stack should be empty (unless it overflowed)");
2440 
2441     if (_markStack.overflow()) {
2442       // This should have been done already when we tried to push an
2443       // entry on to the global mark stack. But let's do it again.
2444       set_has_overflown();
2445     }
2446 
2447     assert(rp->num_q() == active_workers, "why not");
2448 
2449     rp->enqueue_discovered_references(executor);
2450 
2451     rp->verify_no_references_recorded();
2452     assert(!rp->discovery_enabled(), "Post condition");
2453   }
2454 
2455   if (has_overflown()) {
2456     // We can not trust g1_is_alive if the marking stack overflowed
2457     return;
2458   }
2459 
2460   assert(_markStack.isEmpty(), "Marking should have completed");
2461 
2462   // Unload Klasses, String, Symbols, Code Cache, etc.
2463   {
2464     G1CMTraceTime trace("Unloading", G1Log::finer());
2465 
2466     if (ClassUnloadingWithConcurrentMark) {
2467       bool purged_classes;
2468 
2469       {
2470         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2471         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2472       }
2473 
2474       {
2475         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2476         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2477       }
2478     }
2479 
2480     if (G1StringDedup::is_enabled()) {
2481       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2482       G1StringDedup::unlink(&g1_is_alive);
2483     }
2484   }
2485 }
2486 
2487 void ConcurrentMark::swapMarkBitMaps() {
2488   CMBitMapRO* temp = _prevMarkBitMap;
2489   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2490   _nextMarkBitMap  = (CMBitMap*)  temp;
2491 }
2492 
2493 // Closure for marking entries in SATB buffers.
2494 class CMSATBBufferClosure : public SATBBufferClosure {
2495 private:
2496   CMTask* _task;
2497   G1CollectedHeap* _g1h;
2498 
2499   // This is very similar to CMTask::deal_with_reference, but with
2500   // more relaxed requirements for the argument, so this must be more
2501   // circumspect about treating the argument as an object.
2502   void do_entry(void* entry) const {
2503     _task->increment_refs_reached();
2504     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2505     if (entry < hr->next_top_at_mark_start()) {
2506       // Until we get here, we don't know whether entry refers to a valid
2507       // object; it could instead have been a stale reference.
2508       oop obj = static_cast<oop>(entry);
2509       assert(obj->is_oop(true /* ignore mark word */),
2510              err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
2511       _task->make_reference_grey(obj, hr);
2512     }
2513   }
2514 
2515 public:
2516   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2517     : _task(task), _g1h(g1h) { }
2518 
2519   virtual void do_buffer(void** buffer, size_t size) {
2520     for (size_t i = 0; i < size; ++i) {
2521       do_entry(buffer[i]);
2522     }
2523   }
2524 };
2525 
2526 class G1RemarkThreadsClosure : public ThreadClosure {
2527   CMSATBBufferClosure _cm_satb_cl;
2528   G1CMOopClosure _cm_cl;
2529   MarkingCodeBlobClosure _code_cl;
2530   int _thread_parity;
2531 
2532  public:
2533   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) :
2534     _cm_satb_cl(task, g1h),
2535     _cm_cl(g1h, g1h->concurrent_mark(), task),
2536     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2537     _thread_parity(Threads::thread_claim_parity()) {}
2538 
2539   void do_thread(Thread* thread) {
2540     if (thread->is_Java_thread()) {
2541       if (thread->claim_oops_do(true, _thread_parity)) {
2542         JavaThread* jt = (JavaThread*)thread;
2543 
2544         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2545         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2546         // * Alive if on the stack of an executing method
2547         // * Weakly reachable otherwise
2548         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2549         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2550         jt->nmethods_do(&_code_cl);
2551 
2552         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
2553       }
2554     } else if (thread->is_VM_thread()) {
2555       if (thread->claim_oops_do(true, _thread_parity)) {
2556         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
2557       }
2558     }
2559   }
2560 };
2561 
2562 class CMRemarkTask: public AbstractGangTask {
2563 private:
2564   ConcurrentMark* _cm;
2565 public:
2566   void work(uint worker_id) {
2567     // Since all available tasks are actually started, we should
2568     // only proceed if we're supposed to be active.
2569     if (worker_id < _cm->active_tasks()) {
2570       CMTask* task = _cm->task(worker_id);
2571       task->record_start_time();
2572       {
2573         ResourceMark rm;
2574         HandleMark hm;
2575 
2576         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2577         Threads::threads_do(&threads_f);
2578       }
2579 
2580       do {
2581         task->do_marking_step(1000000000.0 /* something very large */,
2582                               true         /* do_termination       */,
2583                               false        /* is_serial            */);
2584       } while (task->has_aborted() && !_cm->has_overflown());
2585       // If we overflow, then we do not want to restart. We instead
2586       // want to abort remark and do concurrent marking again.
2587       task->record_end_time();
2588     }
2589   }
2590 
2591   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2592     AbstractGangTask("Par Remark"), _cm(cm) {
2593     _cm->terminator()->reset_for_reuse(active_workers);
2594   }
2595 };
2596 
2597 void ConcurrentMark::checkpointRootsFinalWork() {
2598   ResourceMark rm;
2599   HandleMark   hm;
2600   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2601 
2602   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2603 
2604   g1h->ensure_parsability(false);
2605 
2606   // this is remark, so we'll use up all active threads
2607   uint active_workers = g1h->workers()->active_workers();
2608   if (active_workers == 0) {
2609     assert(active_workers > 0, "Should have been set earlier");
2610     active_workers = (uint) ParallelGCThreads;
2611     g1h->workers()->set_active_workers(active_workers);
2612   }
2613   set_concurrency_and_phase(active_workers, false /* concurrent */);
2614   // Leave _parallel_marking_threads at it's
2615   // value originally calculated in the ConcurrentMark
2616   // constructor and pass values of the active workers
2617   // through the gang in the task.
2618 
2619   {
2620     StrongRootsScope srs(active_workers);
2621 
2622     CMRemarkTask remarkTask(this, active_workers);
2623     // We will start all available threads, even if we decide that the
2624     // active_workers will be fewer. The extra ones will just bail out
2625     // immediately.
2626     g1h->set_par_threads(active_workers);
2627     g1h->workers()->run_task(&remarkTask);
2628     g1h->set_par_threads(0);
2629   }
2630 
2631   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2632   guarantee(has_overflown() ||
2633             satb_mq_set.completed_buffers_num() == 0,
2634             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2635                     BOOL_TO_STR(has_overflown()),
2636                     satb_mq_set.completed_buffers_num()));
2637 
2638   print_stats();
2639 }
2640 
2641 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2642   // Note we are overriding the read-only view of the prev map here, via
2643   // the cast.
2644   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2645 }
2646 
2647 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2648   _nextMarkBitMap->clearRange(mr);
2649 }
2650 
2651 HeapRegion*
2652 ConcurrentMark::claim_region(uint worker_id) {
2653   // "checkpoint" the finger
2654   HeapWord* finger = _finger;
2655 
2656   // _heap_end will not change underneath our feet; it only changes at
2657   // yield points.
2658   while (finger < _heap_end) {
2659     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2660 
2661     // Note on how this code handles humongous regions. In the
2662     // normal case the finger will reach the start of a "starts
2663     // humongous" (SH) region. Its end will either be the end of the
2664     // last "continues humongous" (CH) region in the sequence, or the
2665     // standard end of the SH region (if the SH is the only region in
2666     // the sequence). That way claim_region() will skip over the CH
2667     // regions. However, there is a subtle race between a CM thread
2668     // executing this method and a mutator thread doing a humongous
2669     // object allocation. The two are not mutually exclusive as the CM
2670     // thread does not need to hold the Heap_lock when it gets
2671     // here. So there is a chance that claim_region() will come across
2672     // a free region that's in the progress of becoming a SH or a CH
2673     // region. In the former case, it will either
2674     //   a) Miss the update to the region's end, in which case it will
2675     //      visit every subsequent CH region, will find their bitmaps
2676     //      empty, and do nothing, or
2677     //   b) Will observe the update of the region's end (in which case
2678     //      it will skip the subsequent CH regions).
2679     // If it comes across a region that suddenly becomes CH, the
2680     // scenario will be similar to b). So, the race between
2681     // claim_region() and a humongous object allocation might force us
2682     // to do a bit of unnecessary work (due to some unnecessary bitmap
2683     // iterations) but it should not introduce and correctness issues.
2684     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2685 
2686     // Above heap_region_containing_raw may return NULL as we always scan claim
2687     // until the end of the heap. In this case, just jump to the next region.
2688     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2689 
2690     // Is the gap between reading the finger and doing the CAS too long?
2691     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2692     if (res == finger && curr_region != NULL) {
2693       // we succeeded
2694       HeapWord*   bottom        = curr_region->bottom();
2695       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2696 
2697       if (verbose_low()) {
2698         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2699                                "["PTR_FORMAT", "PTR_FORMAT"), "
2700                                "limit = "PTR_FORMAT,
2701                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2702       }
2703 
2704       // notice that _finger == end cannot be guaranteed here since,
2705       // someone else might have moved the finger even further
2706       assert(_finger >= end, "the finger should have moved forward");
2707 
2708       if (verbose_low()) {
2709         gclog_or_tty->print_cr("[%u] we were successful with region = "
2710                                PTR_FORMAT, worker_id, p2i(curr_region));
2711       }
2712 
2713       if (limit > bottom) {
2714         if (verbose_low()) {
2715           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2716                                  "returning it ", worker_id, p2i(curr_region));
2717         }
2718         return curr_region;
2719       } else {
2720         assert(limit == bottom,
2721                "the region limit should be at bottom");
2722         if (verbose_low()) {
2723           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2724                                  "returning NULL", worker_id, p2i(curr_region));
2725         }
2726         // we return NULL and the caller should try calling
2727         // claim_region() again.
2728         return NULL;
2729       }
2730     } else {
2731       assert(_finger > finger, "the finger should have moved forward");
2732       if (verbose_low()) {
2733         if (curr_region == NULL) {
2734           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2735                                  "global finger = "PTR_FORMAT", "
2736                                  "our finger = "PTR_FORMAT,
2737                                  worker_id, p2i(_finger), p2i(finger));
2738         } else {
2739           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2740                                  "global finger = "PTR_FORMAT", "
2741                                  "our finger = "PTR_FORMAT,
2742                                  worker_id, p2i(_finger), p2i(finger));
2743         }
2744       }
2745 
2746       // read it again
2747       finger = _finger;
2748     }
2749   }
2750 
2751   return NULL;
2752 }
2753 
2754 #ifndef PRODUCT
2755 enum VerifyNoCSetOopsPhase {
2756   VerifyNoCSetOopsStack,
2757   VerifyNoCSetOopsQueues
2758 };
2759 
2760 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2761 private:
2762   G1CollectedHeap* _g1h;
2763   VerifyNoCSetOopsPhase _phase;
2764   int _info;
2765 
2766   const char* phase_str() {
2767     switch (_phase) {
2768     case VerifyNoCSetOopsStack:         return "Stack";
2769     case VerifyNoCSetOopsQueues:        return "Queue";
2770     default:                            ShouldNotReachHere();
2771     }
2772     return NULL;
2773   }
2774 
2775   void do_object_work(oop obj) {
2776     guarantee(!_g1h->obj_in_cs(obj),
2777               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2778                       p2i((void*) obj), phase_str(), _info));
2779   }
2780 
2781 public:
2782   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2783 
2784   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2785     _phase = phase;
2786     _info = info;
2787   }
2788 
2789   virtual void do_oop(oop* p) {
2790     oop obj = oopDesc::load_decode_heap_oop(p);
2791     do_object_work(obj);
2792   }
2793 
2794   virtual void do_oop(narrowOop* p) {
2795     // We should not come across narrow oops while scanning marking
2796     // stacks
2797     ShouldNotReachHere();
2798   }
2799 
2800   virtual void do_object(oop obj) {
2801     do_object_work(obj);
2802   }
2803 };
2804 
2805 void ConcurrentMark::verify_no_cset_oops() {
2806   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2807   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2808     return;
2809   }
2810 
2811   VerifyNoCSetOopsClosure cl;
2812 
2813   // Verify entries on the global mark stack
2814   cl.set_phase(VerifyNoCSetOopsStack);
2815   _markStack.oops_do(&cl);
2816 
2817   // Verify entries on the task queues
2818   for (uint i = 0; i < _max_worker_id; i += 1) {
2819     cl.set_phase(VerifyNoCSetOopsQueues, i);
2820     CMTaskQueue* queue = _task_queues->queue(i);
2821     queue->oops_do(&cl);
2822   }
2823 
2824   // Verify the global finger
2825   HeapWord* global_finger = finger();
2826   if (global_finger != NULL && global_finger < _heap_end) {
2827     // The global finger always points to a heap region boundary. We
2828     // use heap_region_containing_raw() to get the containing region
2829     // given that the global finger could be pointing to a free region
2830     // which subsequently becomes continues humongous. If that
2831     // happens, heap_region_containing() will return the bottom of the
2832     // corresponding starts humongous region and the check below will
2833     // not hold any more.
2834     // Since we always iterate over all regions, we might get a NULL HeapRegion
2835     // here.
2836     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2837     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2838               err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2839                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2840   }
2841 
2842   // Verify the task fingers
2843   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2844   for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2845     CMTask* task = _tasks[i];
2846     HeapWord* task_finger = task->finger();
2847     if (task_finger != NULL && task_finger < _heap_end) {
2848       // See above note on the global finger verification.
2849       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2850       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2851                 !task_hr->in_collection_set(),
2852                 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2853                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2854     }
2855   }
2856 }
2857 #endif // PRODUCT
2858 
2859 // Aggregate the counting data that was constructed concurrently
2860 // with marking.
2861 class AggregateCountDataHRClosure: public HeapRegionClosure {
2862   G1CollectedHeap* _g1h;
2863   ConcurrentMark* _cm;
2864   CardTableModRefBS* _ct_bs;
2865   BitMap* _cm_card_bm;
2866   uint _max_worker_id;
2867 
2868  public:
2869   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2870                               BitMap* cm_card_bm,
2871                               uint max_worker_id) :
2872     _g1h(g1h), _cm(g1h->concurrent_mark()),
2873     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2874     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2875 
2876   bool doHeapRegion(HeapRegion* hr) {
2877     if (hr->is_continues_humongous()) {
2878       // We will ignore these here and process them when their
2879       // associated "starts humongous" region is processed.
2880       // Note that we cannot rely on their associated
2881       // "starts humongous" region to have their bit set to 1
2882       // since, due to the region chunking in the parallel region
2883       // iteration, a "continues humongous" region might be visited
2884       // before its associated "starts humongous".
2885       return false;
2886     }
2887 
2888     HeapWord* start = hr->bottom();
2889     HeapWord* limit = hr->next_top_at_mark_start();
2890     HeapWord* end = hr->end();
2891 
2892     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2893            err_msg("Preconditions not met - "
2894                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
2895                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
2896                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2897 
2898     assert(hr->next_marked_bytes() == 0, "Precondition");
2899 
2900     if (start == limit) {
2901       // NTAMS of this region has not been set so nothing to do.
2902       return false;
2903     }
2904 
2905     // 'start' should be in the heap.
2906     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2907     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2908     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2909 
2910     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2911     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2912     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2913 
2914     // If ntams is not card aligned then we bump card bitmap index
2915     // for limit so that we get the all the cards spanned by
2916     // the object ending at ntams.
2917     // Note: if this is the last region in the heap then ntams
2918     // could be actually just beyond the end of the the heap;
2919     // limit_idx will then  correspond to a (non-existent) card
2920     // that is also outside the heap.
2921     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
2922       limit_idx += 1;
2923     }
2924 
2925     assert(limit_idx <= end_idx, "or else use atomics");
2926 
2927     // Aggregate the "stripe" in the count data associated with hr.
2928     uint hrm_index = hr->hrm_index();
2929     size_t marked_bytes = 0;
2930 
2931     for (uint i = 0; i < _max_worker_id; i += 1) {
2932       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
2933       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
2934 
2935       // Fetch the marked_bytes in this region for task i and
2936       // add it to the running total for this region.
2937       marked_bytes += marked_bytes_array[hrm_index];
2938 
2939       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
2940       // into the global card bitmap.
2941       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
2942 
2943       while (scan_idx < limit_idx) {
2944         assert(task_card_bm->at(scan_idx) == true, "should be");
2945         _cm_card_bm->set_bit(scan_idx);
2946         assert(_cm_card_bm->at(scan_idx) == true, "should be");
2947 
2948         // BitMap::get_next_one_offset() can handle the case when
2949         // its left_offset parameter is greater than its right_offset
2950         // parameter. It does, however, have an early exit if
2951         // left_offset == right_offset. So let's limit the value
2952         // passed in for left offset here.
2953         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
2954         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
2955       }
2956     }
2957 
2958     // Update the marked bytes for this region.
2959     hr->add_to_marked_bytes(marked_bytes);
2960 
2961     // Next heap region
2962     return false;
2963   }
2964 };
2965 
2966 class G1AggregateCountDataTask: public AbstractGangTask {
2967 protected:
2968   G1CollectedHeap* _g1h;
2969   ConcurrentMark* _cm;
2970   BitMap* _cm_card_bm;
2971   uint _max_worker_id;
2972   uint _active_workers;
2973   HeapRegionClaimer _hrclaimer;
2974 
2975 public:
2976   G1AggregateCountDataTask(G1CollectedHeap* g1h,
2977                            ConcurrentMark* cm,
2978                            BitMap* cm_card_bm,
2979                            uint max_worker_id,
2980                            uint n_workers) :
2981       AbstractGangTask("Count Aggregation"),
2982       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
2983       _max_worker_id(max_worker_id),
2984       _active_workers(n_workers),
2985       _hrclaimer(_active_workers) {
2986   }
2987 
2988   void work(uint worker_id) {
2989     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
2990 
2991     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
2992   }
2993 };
2994 
2995 
2996 void ConcurrentMark::aggregate_count_data() {
2997   uint n_workers = _g1h->workers()->active_workers();
2998 
2999   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3000                                            _max_worker_id, n_workers);
3001 
3002   _g1h->set_par_threads(n_workers);
3003   _g1h->workers()->run_task(&g1_par_agg_task);
3004   _g1h->set_par_threads(0);
3005 }
3006 
3007 // Clear the per-worker arrays used to store the per-region counting data
3008 void ConcurrentMark::clear_all_count_data() {
3009   // Clear the global card bitmap - it will be filled during
3010   // liveness count aggregation (during remark) and the
3011   // final counting task.
3012   _card_bm.clear();
3013 
3014   // Clear the global region bitmap - it will be filled as part
3015   // of the final counting task.
3016   _region_bm.clear();
3017 
3018   uint max_regions = _g1h->max_regions();
3019   assert(_max_worker_id > 0, "uninitialized");
3020 
3021   for (uint i = 0; i < _max_worker_id; i += 1) {
3022     BitMap* task_card_bm = count_card_bitmap_for(i);
3023     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3024 
3025     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3026     assert(marked_bytes_array != NULL, "uninitialized");
3027 
3028     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3029     task_card_bm->clear();
3030   }
3031 }
3032 
3033 void ConcurrentMark::print_stats() {
3034   if (verbose_stats()) {
3035     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3036     for (size_t i = 0; i < _active_tasks; ++i) {
3037       _tasks[i]->print_stats();
3038       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3039     }
3040   }
3041 }
3042 
3043 // abandon current marking iteration due to a Full GC
3044 void ConcurrentMark::abort() {
3045   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3046   // concurrent bitmap clearing.
3047   _nextMarkBitMap->clearAll();
3048 
3049   // Note we cannot clear the previous marking bitmap here
3050   // since VerifyDuringGC verifies the objects marked during
3051   // a full GC against the previous bitmap.
3052 
3053   // Clear the liveness counting data
3054   clear_all_count_data();
3055   // Empty mark stack
3056   reset_marking_state();
3057   for (uint i = 0; i < _max_worker_id; ++i) {
3058     _tasks[i]->clear_region_fields();
3059   }
3060   _first_overflow_barrier_sync.abort();
3061   _second_overflow_barrier_sync.abort();
3062   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3063   if (!gc_id.is_undefined()) {
3064     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3065     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3066     _aborted_gc_id = gc_id;
3067    }
3068   _has_aborted = true;
3069 
3070   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3071   satb_mq_set.abandon_partial_marking();
3072   // This can be called either during or outside marking, we'll read
3073   // the expected_active value from the SATB queue set.
3074   satb_mq_set.set_active_all_threads(
3075                                  false, /* new active value */
3076                                  satb_mq_set.is_active() /* expected_active */);
3077 
3078   _g1h->trace_heap_after_concurrent_cycle();
3079   _g1h->register_concurrent_cycle_end();
3080 }
3081 
3082 const GCId& ConcurrentMark::concurrent_gc_id() {
3083   if (has_aborted()) {
3084     return _aborted_gc_id;
3085   }
3086   return _g1h->gc_tracer_cm()->gc_id();
3087 }
3088 
3089 static void print_ms_time_info(const char* prefix, const char* name,
3090                                NumberSeq& ns) {
3091   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3092                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3093   if (ns.num() > 0) {
3094     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3095                            prefix, ns.sd(), ns.maximum());
3096   }
3097 }
3098 
3099 void ConcurrentMark::print_summary_info() {
3100   gclog_or_tty->print_cr(" Concurrent marking:");
3101   print_ms_time_info("  ", "init marks", _init_times);
3102   print_ms_time_info("  ", "remarks", _remark_times);
3103   {
3104     print_ms_time_info("     ", "final marks", _remark_mark_times);
3105     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3106 
3107   }
3108   print_ms_time_info("  ", "cleanups", _cleanup_times);
3109   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3110                          _total_counting_time,
3111                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3112                           (double)_cleanup_times.num()
3113                          : 0.0));
3114   if (G1ScrubRemSets) {
3115     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3116                            _total_rs_scrub_time,
3117                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3118                             (double)_cleanup_times.num()
3119                            : 0.0));
3120   }
3121   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3122                          (_init_times.sum() + _remark_times.sum() +
3123                           _cleanup_times.sum())/1000.0);
3124   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3125                 "(%8.2f s marking).",
3126                 cmThread()->vtime_accum(),
3127                 cmThread()->vtime_mark_accum());
3128 }
3129 
3130 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3131   _parallel_workers->print_worker_threads_on(st);
3132 }
3133 
3134 void ConcurrentMark::print_on_error(outputStream* st) const {
3135   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3136       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3137   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3138   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3139 }
3140 
3141 // We take a break if someone is trying to stop the world.
3142 bool ConcurrentMark::do_yield_check(uint worker_id) {
3143   if (SuspendibleThreadSet::should_yield()) {
3144     if (worker_id == 0) {
3145       _g1h->g1_policy()->record_concurrent_pause();
3146     }
3147     SuspendibleThreadSet::yield();
3148     return true;
3149   } else {
3150     return false;
3151   }
3152 }
3153 
3154 #ifndef PRODUCT
3155 // for debugging purposes
3156 void ConcurrentMark::print_finger() {
3157   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3158                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3159   for (uint i = 0; i < _max_worker_id; ++i) {
3160     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3161   }
3162   gclog_or_tty->cr();
3163 }
3164 #endif
3165 
3166 template<bool scan>
3167 inline void CMTask::process_grey_object(oop obj) {
3168   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3169   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3170 
3171   if (_cm->verbose_high()) {
3172     gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3173                            _worker_id, p2i((void*) obj));
3174   }
3175 
3176   size_t obj_size = obj->size();
3177   _words_scanned += obj_size;
3178 
3179   if (scan) {
3180     obj->oop_iterate(_cm_oop_closure);
3181   }
3182   statsOnly( ++_objs_scanned );
3183   check_limits();
3184 }
3185 
3186 template void CMTask::process_grey_object<true>(oop);
3187 template void CMTask::process_grey_object<false>(oop);
3188 
3189 // Closure for iteration over bitmaps
3190 class CMBitMapClosure : public BitMapClosure {
3191 private:
3192   // the bitmap that is being iterated over
3193   CMBitMap*                   _nextMarkBitMap;
3194   ConcurrentMark*             _cm;
3195   CMTask*                     _task;
3196 
3197 public:
3198   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3199     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3200 
3201   bool do_bit(size_t offset) {
3202     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3203     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3204     assert( addr < _cm->finger(), "invariant");
3205 
3206     statsOnly( _task->increase_objs_found_on_bitmap() );
3207     assert(addr >= _task->finger(), "invariant");
3208 
3209     // We move that task's local finger along.
3210     _task->move_finger_to(addr);
3211 
3212     _task->scan_object(oop(addr));
3213     // we only partially drain the local queue and global stack
3214     _task->drain_local_queue(true);
3215     _task->drain_global_stack(true);
3216 
3217     // if the has_aborted flag has been raised, we need to bail out of
3218     // the iteration
3219     return !_task->has_aborted();
3220   }
3221 };
3222 
3223 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3224                                ConcurrentMark* cm,
3225                                CMTask* task)
3226   : _g1h(g1h), _cm(cm), _task(task) {
3227   assert(_ref_processor == NULL, "should be initialized to NULL");
3228 
3229   if (G1UseConcMarkReferenceProcessing) {
3230     _ref_processor = g1h->ref_processor_cm();
3231     assert(_ref_processor != NULL, "should not be NULL");
3232   }
3233 }
3234 
3235 void CMTask::setup_for_region(HeapRegion* hr) {
3236   assert(hr != NULL,
3237         "claim_region() should have filtered out NULL regions");
3238   assert(!hr->is_continues_humongous(),
3239         "claim_region() should have filtered out continues humongous regions");
3240 
3241   if (_cm->verbose_low()) {
3242     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3243                            _worker_id, p2i(hr));
3244   }
3245 
3246   _curr_region  = hr;
3247   _finger       = hr->bottom();
3248   update_region_limit();
3249 }
3250 
3251 void CMTask::update_region_limit() {
3252   HeapRegion* hr            = _curr_region;
3253   HeapWord* bottom          = hr->bottom();
3254   HeapWord* limit           = hr->next_top_at_mark_start();
3255 
3256   if (limit == bottom) {
3257     if (_cm->verbose_low()) {
3258       gclog_or_tty->print_cr("[%u] found an empty region "
3259                              "["PTR_FORMAT", "PTR_FORMAT")",
3260                              _worker_id, p2i(bottom), p2i(limit));
3261     }
3262     // The region was collected underneath our feet.
3263     // We set the finger to bottom to ensure that the bitmap
3264     // iteration that will follow this will not do anything.
3265     // (this is not a condition that holds when we set the region up,
3266     // as the region is not supposed to be empty in the first place)
3267     _finger = bottom;
3268   } else if (limit >= _region_limit) {
3269     assert(limit >= _finger, "peace of mind");
3270   } else {
3271     assert(limit < _region_limit, "only way to get here");
3272     // This can happen under some pretty unusual circumstances.  An
3273     // evacuation pause empties the region underneath our feet (NTAMS
3274     // at bottom). We then do some allocation in the region (NTAMS
3275     // stays at bottom), followed by the region being used as a GC
3276     // alloc region (NTAMS will move to top() and the objects
3277     // originally below it will be grayed). All objects now marked in
3278     // the region are explicitly grayed, if below the global finger,
3279     // and we do not need in fact to scan anything else. So, we simply
3280     // set _finger to be limit to ensure that the bitmap iteration
3281     // doesn't do anything.
3282     _finger = limit;
3283   }
3284 
3285   _region_limit = limit;
3286 }
3287 
3288 void CMTask::giveup_current_region() {
3289   assert(_curr_region != NULL, "invariant");
3290   if (_cm->verbose_low()) {
3291     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3292                            _worker_id, p2i(_curr_region));
3293   }
3294   clear_region_fields();
3295 }
3296 
3297 void CMTask::clear_region_fields() {
3298   // Values for these three fields that indicate that we're not
3299   // holding on to a region.
3300   _curr_region   = NULL;
3301   _finger        = NULL;
3302   _region_limit  = NULL;
3303 }
3304 
3305 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3306   if (cm_oop_closure == NULL) {
3307     assert(_cm_oop_closure != NULL, "invariant");
3308   } else {
3309     assert(_cm_oop_closure == NULL, "invariant");
3310   }
3311   _cm_oop_closure = cm_oop_closure;
3312 }
3313 
3314 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3315   guarantee(nextMarkBitMap != NULL, "invariant");
3316 
3317   if (_cm->verbose_low()) {
3318     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3319   }
3320 
3321   _nextMarkBitMap                = nextMarkBitMap;
3322   clear_region_fields();
3323 
3324   _calls                         = 0;
3325   _elapsed_time_ms               = 0.0;
3326   _termination_time_ms           = 0.0;
3327   _termination_start_time_ms     = 0.0;
3328 
3329 #if _MARKING_STATS_
3330   _aborted                       = 0;
3331   _aborted_overflow              = 0;
3332   _aborted_cm_aborted            = 0;
3333   _aborted_yield                 = 0;
3334   _aborted_timed_out             = 0;
3335   _aborted_satb                  = 0;
3336   _aborted_termination           = 0;
3337   _steal_attempts                = 0;
3338   _steals                        = 0;
3339   _local_pushes                  = 0;
3340   _local_pops                    = 0;
3341   _local_max_size                = 0;
3342   _objs_scanned                  = 0;
3343   _global_pushes                 = 0;
3344   _global_pops                   = 0;
3345   _global_max_size               = 0;
3346   _global_transfers_to           = 0;
3347   _global_transfers_from         = 0;
3348   _regions_claimed               = 0;
3349   _objs_found_on_bitmap          = 0;
3350   _satb_buffers_processed        = 0;
3351 #endif // _MARKING_STATS_
3352 }
3353 
3354 bool CMTask::should_exit_termination() {
3355   regular_clock_call();
3356   // This is called when we are in the termination protocol. We should
3357   // quit if, for some reason, this task wants to abort or the global
3358   // stack is not empty (this means that we can get work from it).
3359   return !_cm->mark_stack_empty() || has_aborted();
3360 }
3361 
3362 void CMTask::reached_limit() {
3363   assert(_words_scanned >= _words_scanned_limit ||
3364          _refs_reached >= _refs_reached_limit ,
3365          "shouldn't have been called otherwise");
3366   regular_clock_call();
3367 }
3368 
3369 void CMTask::regular_clock_call() {
3370   if (has_aborted()) return;
3371 
3372   // First, we need to recalculate the words scanned and refs reached
3373   // limits for the next clock call.
3374   recalculate_limits();
3375 
3376   // During the regular clock call we do the following
3377 
3378   // (1) If an overflow has been flagged, then we abort.
3379   if (_cm->has_overflown()) {
3380     set_has_aborted();
3381     return;
3382   }
3383 
3384   // If we are not concurrent (i.e. we're doing remark) we don't need
3385   // to check anything else. The other steps are only needed during
3386   // the concurrent marking phase.
3387   if (!concurrent()) return;
3388 
3389   // (2) If marking has been aborted for Full GC, then we also abort.
3390   if (_cm->has_aborted()) {
3391     set_has_aborted();
3392     statsOnly( ++_aborted_cm_aborted );
3393     return;
3394   }
3395 
3396   double curr_time_ms = os::elapsedVTime() * 1000.0;
3397 
3398   // (3) If marking stats are enabled, then we update the step history.
3399 #if _MARKING_STATS_
3400   if (_words_scanned >= _words_scanned_limit) {
3401     ++_clock_due_to_scanning;
3402   }
3403   if (_refs_reached >= _refs_reached_limit) {
3404     ++_clock_due_to_marking;
3405   }
3406 
3407   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3408   _interval_start_time_ms = curr_time_ms;
3409   _all_clock_intervals_ms.add(last_interval_ms);
3410 
3411   if (_cm->verbose_medium()) {
3412       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3413                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3414                         _worker_id, last_interval_ms,
3415                         _words_scanned,
3416                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3417                         _refs_reached,
3418                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3419   }
3420 #endif // _MARKING_STATS_
3421 
3422   // (4) We check whether we should yield. If we have to, then we abort.
3423   if (SuspendibleThreadSet::should_yield()) {
3424     // We should yield. To do this we abort the task. The caller is
3425     // responsible for yielding.
3426     set_has_aborted();
3427     statsOnly( ++_aborted_yield );
3428     return;
3429   }
3430 
3431   // (5) We check whether we've reached our time quota. If we have,
3432   // then we abort.
3433   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3434   if (elapsed_time_ms > _time_target_ms) {
3435     set_has_aborted();
3436     _has_timed_out = true;
3437     statsOnly( ++_aborted_timed_out );
3438     return;
3439   }
3440 
3441   // (6) Finally, we check whether there are enough completed STAB
3442   // buffers available for processing. If there are, we abort.
3443   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3444   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3445     if (_cm->verbose_low()) {
3446       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3447                              _worker_id);
3448     }
3449     // we do need to process SATB buffers, we'll abort and restart
3450     // the marking task to do so
3451     set_has_aborted();
3452     statsOnly( ++_aborted_satb );
3453     return;
3454   }
3455 }
3456 
3457 void CMTask::recalculate_limits() {
3458   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3459   _words_scanned_limit      = _real_words_scanned_limit;
3460 
3461   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3462   _refs_reached_limit       = _real_refs_reached_limit;
3463 }
3464 
3465 void CMTask::decrease_limits() {
3466   // This is called when we believe that we're going to do an infrequent
3467   // operation which will increase the per byte scanned cost (i.e. move
3468   // entries to/from the global stack). It basically tries to decrease the
3469   // scanning limit so that the clock is called earlier.
3470 
3471   if (_cm->verbose_medium()) {
3472     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3473   }
3474 
3475   _words_scanned_limit = _real_words_scanned_limit -
3476     3 * words_scanned_period / 4;
3477   _refs_reached_limit  = _real_refs_reached_limit -
3478     3 * refs_reached_period / 4;
3479 }
3480 
3481 void CMTask::move_entries_to_global_stack() {
3482   // local array where we'll store the entries that will be popped
3483   // from the local queue
3484   oop buffer[global_stack_transfer_size];
3485 
3486   int n = 0;
3487   oop obj;
3488   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3489     buffer[n] = obj;
3490     ++n;
3491   }
3492 
3493   if (n > 0) {
3494     // we popped at least one entry from the local queue
3495 
3496     statsOnly( ++_global_transfers_to; _local_pops += n );
3497 
3498     if (!_cm->mark_stack_push(buffer, n)) {
3499       if (_cm->verbose_low()) {
3500         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3501                                _worker_id);
3502       }
3503       set_has_aborted();
3504     } else {
3505       // the transfer was successful
3506 
3507       if (_cm->verbose_medium()) {
3508         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3509                                _worker_id, n);
3510       }
3511       statsOnly( size_t tmp_size = _cm->mark_stack_size();
3512                  if (tmp_size > _global_max_size) {
3513                    _global_max_size = tmp_size;
3514                  }
3515                  _global_pushes += n );
3516     }
3517   }
3518 
3519   // this operation was quite expensive, so decrease the limits
3520   decrease_limits();
3521 }
3522 
3523 void CMTask::get_entries_from_global_stack() {
3524   // local array where we'll store the entries that will be popped
3525   // from the global stack.
3526   oop buffer[global_stack_transfer_size];
3527   int n;
3528   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3529   assert(n <= global_stack_transfer_size,
3530          "we should not pop more than the given limit");
3531   if (n > 0) {
3532     // yes, we did actually pop at least one entry
3533 
3534     statsOnly( ++_global_transfers_from; _global_pops += n );
3535     if (_cm->verbose_medium()) {
3536       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3537                              _worker_id, n);
3538     }
3539     for (int i = 0; i < n; ++i) {
3540       bool success = _task_queue->push(buffer[i]);
3541       // We only call this when the local queue is empty or under a
3542       // given target limit. So, we do not expect this push to fail.
3543       assert(success, "invariant");
3544     }
3545 
3546     statsOnly( size_t tmp_size = (size_t)_task_queue->size();
3547                if (tmp_size > _local_max_size) {
3548                  _local_max_size = tmp_size;
3549                }
3550                _local_pushes += n );
3551   }
3552 
3553   // this operation was quite expensive, so decrease the limits
3554   decrease_limits();
3555 }
3556 
3557 void CMTask::drain_local_queue(bool partially) {
3558   if (has_aborted()) return;
3559 
3560   // Decide what the target size is, depending whether we're going to
3561   // drain it partially (so that other tasks can steal if they run out
3562   // of things to do) or totally (at the very end).
3563   size_t target_size;
3564   if (partially) {
3565     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3566   } else {
3567     target_size = 0;
3568   }
3569 
3570   if (_task_queue->size() > target_size) {
3571     if (_cm->verbose_high()) {
3572       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3573                              _worker_id, target_size);
3574     }
3575 
3576     oop obj;
3577     bool ret = _task_queue->pop_local(obj);
3578     while (ret) {
3579       statsOnly( ++_local_pops );
3580 
3581       if (_cm->verbose_high()) {
3582         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3583                                p2i((void*) obj));
3584       }
3585 
3586       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3587       assert(!_g1h->is_on_master_free_list(
3588                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3589 
3590       scan_object(obj);
3591 
3592       if (_task_queue->size() <= target_size || has_aborted()) {
3593         ret = false;
3594       } else {
3595         ret = _task_queue->pop_local(obj);
3596       }
3597     }
3598 
3599     if (_cm->verbose_high()) {
3600       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3601                              _worker_id, _task_queue->size());
3602     }
3603   }
3604 }
3605 
3606 void CMTask::drain_global_stack(bool partially) {
3607   if (has_aborted()) return;
3608 
3609   // We have a policy to drain the local queue before we attempt to
3610   // drain the global stack.
3611   assert(partially || _task_queue->size() == 0, "invariant");
3612 
3613   // Decide what the target size is, depending whether we're going to
3614   // drain it partially (so that other tasks can steal if they run out
3615   // of things to do) or totally (at the very end).  Notice that,
3616   // because we move entries from the global stack in chunks or
3617   // because another task might be doing the same, we might in fact
3618   // drop below the target. But, this is not a problem.
3619   size_t target_size;
3620   if (partially) {
3621     target_size = _cm->partial_mark_stack_size_target();
3622   } else {
3623     target_size = 0;
3624   }
3625 
3626   if (_cm->mark_stack_size() > target_size) {
3627     if (_cm->verbose_low()) {
3628       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3629                              _worker_id, target_size);
3630     }
3631 
3632     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3633       get_entries_from_global_stack();
3634       drain_local_queue(partially);
3635     }
3636 
3637     if (_cm->verbose_low()) {
3638       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3639                              _worker_id, _cm->mark_stack_size());
3640     }
3641   }
3642 }
3643 
3644 // SATB Queue has several assumptions on whether to call the par or
3645 // non-par versions of the methods. this is why some of the code is
3646 // replicated. We should really get rid of the single-threaded version
3647 // of the code to simplify things.
3648 void CMTask::drain_satb_buffers() {
3649   if (has_aborted()) return;
3650 
3651   // We set this so that the regular clock knows that we're in the
3652   // middle of draining buffers and doesn't set the abort flag when it
3653   // notices that SATB buffers are available for draining. It'd be
3654   // very counter productive if it did that. :-)
3655   _draining_satb_buffers = true;
3656 
3657   CMSATBBufferClosure satb_cl(this, _g1h);
3658   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3659 
3660   // This keeps claiming and applying the closure to completed buffers
3661   // until we run out of buffers or we need to abort.
3662   while (!has_aborted() &&
3663          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3664     if (_cm->verbose_medium()) {
3665       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3666     }
3667     statsOnly( ++_satb_buffers_processed );
3668     regular_clock_call();
3669   }
3670 
3671   _draining_satb_buffers = false;
3672 
3673   assert(has_aborted() ||
3674          concurrent() ||
3675          satb_mq_set.completed_buffers_num() == 0, "invariant");
3676 
3677   // again, this was a potentially expensive operation, decrease the
3678   // limits to get the regular clock call early
3679   decrease_limits();
3680 }
3681 
3682 void CMTask::print_stats() {
3683   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3684                          _worker_id, _calls);
3685   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3686                          _elapsed_time_ms, _termination_time_ms);
3687   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3688                          _step_times_ms.num(), _step_times_ms.avg(),
3689                          _step_times_ms.sd());
3690   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3691                          _step_times_ms.maximum(), _step_times_ms.sum());
3692 
3693 #if _MARKING_STATS_
3694   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3695                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3696                          _all_clock_intervals_ms.sd());
3697   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3698                          _all_clock_intervals_ms.maximum(),
3699                          _all_clock_intervals_ms.sum());
3700   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT,
3701                          _clock_due_to_scanning, _clock_due_to_marking);
3702   gclog_or_tty->print_cr("  Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT,
3703                          _objs_scanned, _objs_found_on_bitmap);
3704   gclog_or_tty->print_cr("  Local Queue:  pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3705                          _local_pushes, _local_pops, _local_max_size);
3706   gclog_or_tty->print_cr("  Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3707                          _global_pushes, _global_pops, _global_max_size);
3708   gclog_or_tty->print_cr("                transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT,
3709                          _global_transfers_to,_global_transfers_from);
3710   gclog_or_tty->print_cr("  Regions: claimed = " SIZE_FORMAT, _regions_claimed);
3711   gclog_or_tty->print_cr("  SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed);
3712   gclog_or_tty->print_cr("  Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT,
3713                          _steal_attempts, _steals);
3714   gclog_or_tty->print_cr("  Aborted: " SIZE_FORMAT ", due to", _aborted);
3715   gclog_or_tty->print_cr("    overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT,
3716                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3717   gclog_or_tty->print_cr("    time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT,
3718                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3719 #endif // _MARKING_STATS_
3720 }
3721 
3722 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3723   return _task_queues->steal(worker_id, hash_seed, obj);
3724 }
3725 
3726 /*****************************************************************************
3727 
3728     The do_marking_step(time_target_ms, ...) method is the building
3729     block of the parallel marking framework. It can be called in parallel
3730     with other invocations of do_marking_step() on different tasks
3731     (but only one per task, obviously) and concurrently with the
3732     mutator threads, or during remark, hence it eliminates the need
3733     for two versions of the code. When called during remark, it will
3734     pick up from where the task left off during the concurrent marking
3735     phase. Interestingly, tasks are also claimable during evacuation
3736     pauses too, since do_marking_step() ensures that it aborts before
3737     it needs to yield.
3738 
3739     The data structures that it uses to do marking work are the
3740     following:
3741 
3742       (1) Marking Bitmap. If there are gray objects that appear only
3743       on the bitmap (this happens either when dealing with an overflow
3744       or when the initial marking phase has simply marked the roots
3745       and didn't push them on the stack), then tasks claim heap
3746       regions whose bitmap they then scan to find gray objects. A
3747       global finger indicates where the end of the last claimed region
3748       is. A local finger indicates how far into the region a task has
3749       scanned. The two fingers are used to determine how to gray an
3750       object (i.e. whether simply marking it is OK, as it will be
3751       visited by a task in the future, or whether it needs to be also
3752       pushed on a stack).
3753 
3754       (2) Local Queue. The local queue of the task which is accessed
3755       reasonably efficiently by the task. Other tasks can steal from
3756       it when they run out of work. Throughout the marking phase, a
3757       task attempts to keep its local queue short but not totally
3758       empty, so that entries are available for stealing by other
3759       tasks. Only when there is no more work, a task will totally
3760       drain its local queue.
3761 
3762       (3) Global Mark Stack. This handles local queue overflow. During
3763       marking only sets of entries are moved between it and the local
3764       queues, as access to it requires a mutex and more fine-grain
3765       interaction with it which might cause contention. If it
3766       overflows, then the marking phase should restart and iterate
3767       over the bitmap to identify gray objects. Throughout the marking
3768       phase, tasks attempt to keep the global mark stack at a small
3769       length but not totally empty, so that entries are available for
3770       popping by other tasks. Only when there is no more work, tasks
3771       will totally drain the global mark stack.
3772 
3773       (4) SATB Buffer Queue. This is where completed SATB buffers are
3774       made available. Buffers are regularly removed from this queue
3775       and scanned for roots, so that the queue doesn't get too
3776       long. During remark, all completed buffers are processed, as
3777       well as the filled in parts of any uncompleted buffers.
3778 
3779     The do_marking_step() method tries to abort when the time target
3780     has been reached. There are a few other cases when the
3781     do_marking_step() method also aborts:
3782 
3783       (1) When the marking phase has been aborted (after a Full GC).
3784 
3785       (2) When a global overflow (on the global stack) has been
3786       triggered. Before the task aborts, it will actually sync up with
3787       the other tasks to ensure that all the marking data structures
3788       (local queues, stacks, fingers etc.)  are re-initialized so that
3789       when do_marking_step() completes, the marking phase can
3790       immediately restart.
3791 
3792       (3) When enough completed SATB buffers are available. The
3793       do_marking_step() method only tries to drain SATB buffers right
3794       at the beginning. So, if enough buffers are available, the
3795       marking step aborts and the SATB buffers are processed at
3796       the beginning of the next invocation.
3797 
3798       (4) To yield. when we have to yield then we abort and yield
3799       right at the end of do_marking_step(). This saves us from a lot
3800       of hassle as, by yielding we might allow a Full GC. If this
3801       happens then objects will be compacted underneath our feet, the
3802       heap might shrink, etc. We save checking for this by just
3803       aborting and doing the yield right at the end.
3804 
3805     From the above it follows that the do_marking_step() method should
3806     be called in a loop (or, otherwise, regularly) until it completes.
3807 
3808     If a marking step completes without its has_aborted() flag being
3809     true, it means it has completed the current marking phase (and
3810     also all other marking tasks have done so and have all synced up).
3811 
3812     A method called regular_clock_call() is invoked "regularly" (in
3813     sub ms intervals) throughout marking. It is this clock method that
3814     checks all the abort conditions which were mentioned above and
3815     decides when the task should abort. A work-based scheme is used to
3816     trigger this clock method: when the number of object words the
3817     marking phase has scanned or the number of references the marking
3818     phase has visited reach a given limit. Additional invocations to
3819     the method clock have been planted in a few other strategic places
3820     too. The initial reason for the clock method was to avoid calling
3821     vtime too regularly, as it is quite expensive. So, once it was in
3822     place, it was natural to piggy-back all the other conditions on it
3823     too and not constantly check them throughout the code.
3824 
3825     If do_termination is true then do_marking_step will enter its
3826     termination protocol.
3827 
3828     The value of is_serial must be true when do_marking_step is being
3829     called serially (i.e. by the VMThread) and do_marking_step should
3830     skip any synchronization in the termination and overflow code.
3831     Examples include the serial remark code and the serial reference
3832     processing closures.
3833 
3834     The value of is_serial must be false when do_marking_step is
3835     being called by any of the worker threads in a work gang.
3836     Examples include the concurrent marking code (CMMarkingTask),
3837     the MT remark code, and the MT reference processing closures.
3838 
3839  *****************************************************************************/
3840 
3841 void CMTask::do_marking_step(double time_target_ms,
3842                              bool do_termination,
3843                              bool is_serial) {
3844   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3845   assert(concurrent() == _cm->concurrent(), "they should be the same");
3846 
3847   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3848   assert(_task_queues != NULL, "invariant");
3849   assert(_task_queue != NULL, "invariant");
3850   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
3851 
3852   assert(!_claimed,
3853          "only one thread should claim this task at any one time");
3854 
3855   // OK, this doesn't safeguard again all possible scenarios, as it is
3856   // possible for two threads to set the _claimed flag at the same
3857   // time. But it is only for debugging purposes anyway and it will
3858   // catch most problems.
3859   _claimed = true;
3860 
3861   _start_time_ms = os::elapsedVTime() * 1000.0;
3862   statsOnly( _interval_start_time_ms = _start_time_ms );
3863 
3864   // If do_stealing is true then do_marking_step will attempt to
3865   // steal work from the other CMTasks. It only makes sense to
3866   // enable stealing when the termination protocol is enabled
3867   // and do_marking_step() is not being called serially.
3868   bool do_stealing = do_termination && !is_serial;
3869 
3870   double diff_prediction_ms =
3871     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
3872   _time_target_ms = time_target_ms - diff_prediction_ms;
3873 
3874   // set up the variables that are used in the work-based scheme to
3875   // call the regular clock method
3876   _words_scanned = 0;
3877   _refs_reached  = 0;
3878   recalculate_limits();
3879 
3880   // clear all flags
3881   clear_has_aborted();
3882   _has_timed_out = false;
3883   _draining_satb_buffers = false;
3884 
3885   ++_calls;
3886 
3887   if (_cm->verbose_low()) {
3888     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
3889                            "target = %1.2lfms >>>>>>>>>>",
3890                            _worker_id, _calls, _time_target_ms);
3891   }
3892 
3893   // Set up the bitmap and oop closures. Anything that uses them is
3894   // eventually called from this method, so it is OK to allocate these
3895   // statically.
3896   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
3897   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
3898   set_cm_oop_closure(&cm_oop_closure);
3899 
3900   if (_cm->has_overflown()) {
3901     // This can happen if the mark stack overflows during a GC pause
3902     // and this task, after a yield point, restarts. We have to abort
3903     // as we need to get into the overflow protocol which happens
3904     // right at the end of this task.
3905     set_has_aborted();
3906   }
3907 
3908   // First drain any available SATB buffers. After this, we will not
3909   // look at SATB buffers before the next invocation of this method.
3910   // If enough completed SATB buffers are queued up, the regular clock
3911   // will abort this task so that it restarts.
3912   drain_satb_buffers();
3913   // ...then partially drain the local queue and the global stack
3914   drain_local_queue(true);
3915   drain_global_stack(true);
3916 
3917   do {
3918     if (!has_aborted() && _curr_region != NULL) {
3919       // This means that we're already holding on to a region.
3920       assert(_finger != NULL, "if region is not NULL, then the finger "
3921              "should not be NULL either");
3922 
3923       // We might have restarted this task after an evacuation pause
3924       // which might have evacuated the region we're holding on to
3925       // underneath our feet. Let's read its limit again to make sure
3926       // that we do not iterate over a region of the heap that
3927       // contains garbage (update_region_limit() will also move
3928       // _finger to the start of the region if it is found empty).
3929       update_region_limit();
3930       // We will start from _finger not from the start of the region,
3931       // as we might be restarting this task after aborting half-way
3932       // through scanning this region. In this case, _finger points to
3933       // the address where we last found a marked object. If this is a
3934       // fresh region, _finger points to start().
3935       MemRegion mr = MemRegion(_finger, _region_limit);
3936 
3937       if (_cm->verbose_low()) {
3938         gclog_or_tty->print_cr("[%u] we're scanning part "
3939                                "["PTR_FORMAT", "PTR_FORMAT") "
3940                                "of region "HR_FORMAT,
3941                                _worker_id, p2i(_finger), p2i(_region_limit),
3942                                HR_FORMAT_PARAMS(_curr_region));
3943       }
3944 
3945       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
3946              "humongous regions should go around loop once only");
3947 
3948       // Some special cases:
3949       // If the memory region is empty, we can just give up the region.
3950       // If the current region is humongous then we only need to check
3951       // the bitmap for the bit associated with the start of the object,
3952       // scan the object if it's live, and give up the region.
3953       // Otherwise, let's iterate over the bitmap of the part of the region
3954       // that is left.
3955       // If the iteration is successful, give up the region.
3956       if (mr.is_empty()) {
3957         giveup_current_region();
3958         regular_clock_call();
3959       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
3960         if (_nextMarkBitMap->isMarked(mr.start())) {
3961           // The object is marked - apply the closure
3962           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
3963           bitmap_closure.do_bit(offset);
3964         }
3965         // Even if this task aborted while scanning the humongous object
3966         // we can (and should) give up the current region.
3967         giveup_current_region();
3968         regular_clock_call();
3969       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
3970         giveup_current_region();
3971         regular_clock_call();
3972       } else {
3973         assert(has_aborted(), "currently the only way to do so");
3974         // The only way to abort the bitmap iteration is to return
3975         // false from the do_bit() method. However, inside the
3976         // do_bit() method we move the _finger to point to the
3977         // object currently being looked at. So, if we bail out, we
3978         // have definitely set _finger to something non-null.
3979         assert(_finger != NULL, "invariant");
3980 
3981         // Region iteration was actually aborted. So now _finger
3982         // points to the address of the object we last scanned. If we
3983         // leave it there, when we restart this task, we will rescan
3984         // the object. It is easy to avoid this. We move the finger by
3985         // enough to point to the next possible object header (the
3986         // bitmap knows by how much we need to move it as it knows its
3987         // granularity).
3988         assert(_finger < _region_limit, "invariant");
3989         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
3990         // Check if bitmap iteration was aborted while scanning the last object
3991         if (new_finger >= _region_limit) {
3992           giveup_current_region();
3993         } else {
3994           move_finger_to(new_finger);
3995         }
3996       }
3997     }
3998     // At this point we have either completed iterating over the
3999     // region we were holding on to, or we have aborted.
4000 
4001     // We then partially drain the local queue and the global stack.
4002     // (Do we really need this?)
4003     drain_local_queue(true);
4004     drain_global_stack(true);
4005 
4006     // Read the note on the claim_region() method on why it might
4007     // return NULL with potentially more regions available for
4008     // claiming and why we have to check out_of_regions() to determine
4009     // whether we're done or not.
4010     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4011       // We are going to try to claim a new region. We should have
4012       // given up on the previous one.
4013       // Separated the asserts so that we know which one fires.
4014       assert(_curr_region  == NULL, "invariant");
4015       assert(_finger       == NULL, "invariant");
4016       assert(_region_limit == NULL, "invariant");
4017       if (_cm->verbose_low()) {
4018         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4019       }
4020       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4021       if (claimed_region != NULL) {
4022         // Yes, we managed to claim one
4023         statsOnly( ++_regions_claimed );
4024 
4025         if (_cm->verbose_low()) {
4026           gclog_or_tty->print_cr("[%u] we successfully claimed "
4027                                  "region "PTR_FORMAT,
4028                                  _worker_id, p2i(claimed_region));
4029         }
4030 
4031         setup_for_region(claimed_region);
4032         assert(_curr_region == claimed_region, "invariant");
4033       }
4034       // It is important to call the regular clock here. It might take
4035       // a while to claim a region if, for example, we hit a large
4036       // block of empty regions. So we need to call the regular clock
4037       // method once round the loop to make sure it's called
4038       // frequently enough.
4039       regular_clock_call();
4040     }
4041 
4042     if (!has_aborted() && _curr_region == NULL) {
4043       assert(_cm->out_of_regions(),
4044              "at this point we should be out of regions");
4045     }
4046   } while ( _curr_region != NULL && !has_aborted());
4047 
4048   if (!has_aborted()) {
4049     // We cannot check whether the global stack is empty, since other
4050     // tasks might be pushing objects to it concurrently.
4051     assert(_cm->out_of_regions(),
4052            "at this point we should be out of regions");
4053 
4054     if (_cm->verbose_low()) {
4055       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4056     }
4057 
4058     // Try to reduce the number of available SATB buffers so that
4059     // remark has less work to do.
4060     drain_satb_buffers();
4061   }
4062 
4063   // Since we've done everything else, we can now totally drain the
4064   // local queue and global stack.
4065   drain_local_queue(false);
4066   drain_global_stack(false);
4067 
4068   // Attempt at work stealing from other task's queues.
4069   if (do_stealing && !has_aborted()) {
4070     // We have not aborted. This means that we have finished all that
4071     // we could. Let's try to do some stealing...
4072 
4073     // We cannot check whether the global stack is empty, since other
4074     // tasks might be pushing objects to it concurrently.
4075     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4076            "only way to reach here");
4077 
4078     if (_cm->verbose_low()) {
4079       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4080     }
4081 
4082     while (!has_aborted()) {
4083       oop obj;
4084       statsOnly( ++_steal_attempts );
4085 
4086       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4087         if (_cm->verbose_medium()) {
4088           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4089                                  _worker_id, p2i((void*) obj));
4090         }
4091 
4092         statsOnly( ++_steals );
4093 
4094         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4095                "any stolen object should be marked");
4096         scan_object(obj);
4097 
4098         // And since we're towards the end, let's totally drain the
4099         // local queue and global stack.
4100         drain_local_queue(false);
4101         drain_global_stack(false);
4102       } else {
4103         break;
4104       }
4105     }
4106   }
4107 
4108   // If we are about to wrap up and go into termination, check if we
4109   // should raise the overflow flag.
4110   if (do_termination && !has_aborted()) {
4111     if (_cm->force_overflow()->should_force()) {
4112       _cm->set_has_overflown();
4113       regular_clock_call();
4114     }
4115   }
4116 
4117   // We still haven't aborted. Now, let's try to get into the
4118   // termination protocol.
4119   if (do_termination && !has_aborted()) {
4120     // We cannot check whether the global stack is empty, since other
4121     // tasks might be concurrently pushing objects on it.
4122     // Separated the asserts so that we know which one fires.
4123     assert(_cm->out_of_regions(), "only way to reach here");
4124     assert(_task_queue->size() == 0, "only way to reach here");
4125 
4126     if (_cm->verbose_low()) {
4127       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4128     }
4129 
4130     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4131 
4132     // The CMTask class also extends the TerminatorTerminator class,
4133     // hence its should_exit_termination() method will also decide
4134     // whether to exit the termination protocol or not.
4135     bool finished = (is_serial ||
4136                      _cm->terminator()->offer_termination(this));
4137     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4138     _termination_time_ms +=
4139       termination_end_time_ms - _termination_start_time_ms;
4140 
4141     if (finished) {
4142       // We're all done.
4143 
4144       if (_worker_id == 0) {
4145         // let's allow task 0 to do this
4146         if (concurrent()) {
4147           assert(_cm->concurrent_marking_in_progress(), "invariant");
4148           // we need to set this to false before the next
4149           // safepoint. This way we ensure that the marking phase
4150           // doesn't observe any more heap expansions.
4151           _cm->clear_concurrent_marking_in_progress();
4152         }
4153       }
4154 
4155       // We can now guarantee that the global stack is empty, since
4156       // all other tasks have finished. We separated the guarantees so
4157       // that, if a condition is false, we can immediately find out
4158       // which one.
4159       guarantee(_cm->out_of_regions(), "only way to reach here");
4160       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4161       guarantee(_task_queue->size() == 0, "only way to reach here");
4162       guarantee(!_cm->has_overflown(), "only way to reach here");
4163       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4164 
4165       if (_cm->verbose_low()) {
4166         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4167       }
4168     } else {
4169       // Apparently there's more work to do. Let's abort this task. It
4170       // will restart it and we can hopefully find more things to do.
4171 
4172       if (_cm->verbose_low()) {
4173         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4174                                _worker_id);
4175       }
4176 
4177       set_has_aborted();
4178       statsOnly( ++_aborted_termination );
4179     }
4180   }
4181 
4182   // Mainly for debugging purposes to make sure that a pointer to the
4183   // closure which was statically allocated in this frame doesn't
4184   // escape it by accident.
4185   set_cm_oop_closure(NULL);
4186   double end_time_ms = os::elapsedVTime() * 1000.0;
4187   double elapsed_time_ms = end_time_ms - _start_time_ms;
4188   // Update the step history.
4189   _step_times_ms.add(elapsed_time_ms);
4190 
4191   if (has_aborted()) {
4192     // The task was aborted for some reason.
4193 
4194     statsOnly( ++_aborted );
4195 
4196     if (_has_timed_out) {
4197       double diff_ms = elapsed_time_ms - _time_target_ms;
4198       // Keep statistics of how well we did with respect to hitting
4199       // our target only if we actually timed out (if we aborted for
4200       // other reasons, then the results might get skewed).
4201       _marking_step_diffs_ms.add(diff_ms);
4202     }
4203 
4204     if (_cm->has_overflown()) {
4205       // This is the interesting one. We aborted because a global
4206       // overflow was raised. This means we have to restart the
4207       // marking phase and start iterating over regions. However, in
4208       // order to do this we have to make sure that all tasks stop
4209       // what they are doing and re-initialize in a safe manner. We
4210       // will achieve this with the use of two barrier sync points.
4211 
4212       if (_cm->verbose_low()) {
4213         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4214       }
4215 
4216       if (!is_serial) {
4217         // We only need to enter the sync barrier if being called
4218         // from a parallel context
4219         _cm->enter_first_sync_barrier(_worker_id);
4220 
4221         // When we exit this sync barrier we know that all tasks have
4222         // stopped doing marking work. So, it's now safe to
4223         // re-initialize our data structures. At the end of this method,
4224         // task 0 will clear the global data structures.
4225       }
4226 
4227       statsOnly( ++_aborted_overflow );
4228 
4229       // We clear the local state of this task...
4230       clear_region_fields();
4231 
4232       if (!is_serial) {
4233         // ...and enter the second barrier.
4234         _cm->enter_second_sync_barrier(_worker_id);
4235       }
4236       // At this point, if we're during the concurrent phase of
4237       // marking, everything has been re-initialized and we're
4238       // ready to restart.
4239     }
4240 
4241     if (_cm->verbose_low()) {
4242       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4243                              "elapsed = %1.2lfms <<<<<<<<<<",
4244                              _worker_id, _time_target_ms, elapsed_time_ms);
4245       if (_cm->has_aborted()) {
4246         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4247                                _worker_id);
4248       }
4249     }
4250   } else {
4251     if (_cm->verbose_low()) {
4252       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4253                              "elapsed = %1.2lfms <<<<<<<<<<",
4254                              _worker_id, _time_target_ms, elapsed_time_ms);
4255     }
4256   }
4257 
4258   _claimed = false;
4259 }
4260 
4261 CMTask::CMTask(uint worker_id,
4262                ConcurrentMark* cm,
4263                size_t* marked_bytes,
4264                BitMap* card_bm,
4265                CMTaskQueue* task_queue,
4266                CMTaskQueueSet* task_queues)
4267   : _g1h(G1CollectedHeap::heap()),
4268     _worker_id(worker_id), _cm(cm),
4269     _claimed(false),
4270     _nextMarkBitMap(NULL), _hash_seed(17),
4271     _task_queue(task_queue),
4272     _task_queues(task_queues),
4273     _cm_oop_closure(NULL),
4274     _marked_bytes_array(marked_bytes),
4275     _card_bm(card_bm) {
4276   guarantee(task_queue != NULL, "invariant");
4277   guarantee(task_queues != NULL, "invariant");
4278 
4279   statsOnly( _clock_due_to_scanning = 0;
4280              _clock_due_to_marking  = 0 );
4281 
4282   _marking_step_diffs_ms.add(0.5);
4283 }
4284 
4285 // These are formatting macros that are used below to ensure
4286 // consistent formatting. The *_H_* versions are used to format the
4287 // header for a particular value and they should be kept consistent
4288 // with the corresponding macro. Also note that most of the macros add
4289 // the necessary white space (as a prefix) which makes them a bit
4290 // easier to compose.
4291 
4292 // All the output lines are prefixed with this string to be able to
4293 // identify them easily in a large log file.
4294 #define G1PPRL_LINE_PREFIX            "###"
4295 
4296 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4297 #ifdef _LP64
4298 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4299 #else // _LP64
4300 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4301 #endif // _LP64
4302 
4303 // For per-region info
4304 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4305 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4306 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4307 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4308 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4309 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4310 
4311 // For summary info
4312 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4313 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4314 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4315 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4316 
4317 G1PrintRegionLivenessInfoClosure::
4318 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4319   : _out(out),
4320     _total_used_bytes(0), _total_capacity_bytes(0),
4321     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4322     _hum_used_bytes(0), _hum_capacity_bytes(0),
4323     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4324     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4325   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4326   MemRegion g1_reserved = g1h->g1_reserved();
4327   double now = os::elapsedTime();
4328 
4329   // Print the header of the output.
4330   _out->cr();
4331   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4332   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4333                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4334                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4335                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4336                  HeapRegion::GrainBytes);
4337   _out->print_cr(G1PPRL_LINE_PREFIX);
4338   _out->print_cr(G1PPRL_LINE_PREFIX
4339                 G1PPRL_TYPE_H_FORMAT
4340                 G1PPRL_ADDR_BASE_H_FORMAT
4341                 G1PPRL_BYTE_H_FORMAT
4342                 G1PPRL_BYTE_H_FORMAT
4343                 G1PPRL_BYTE_H_FORMAT
4344                 G1PPRL_DOUBLE_H_FORMAT
4345                 G1PPRL_BYTE_H_FORMAT
4346                 G1PPRL_BYTE_H_FORMAT,
4347                 "type", "address-range",
4348                 "used", "prev-live", "next-live", "gc-eff",
4349                 "remset", "code-roots");
4350   _out->print_cr(G1PPRL_LINE_PREFIX
4351                 G1PPRL_TYPE_H_FORMAT
4352                 G1PPRL_ADDR_BASE_H_FORMAT
4353                 G1PPRL_BYTE_H_FORMAT
4354                 G1PPRL_BYTE_H_FORMAT
4355                 G1PPRL_BYTE_H_FORMAT
4356                 G1PPRL_DOUBLE_H_FORMAT
4357                 G1PPRL_BYTE_H_FORMAT
4358                 G1PPRL_BYTE_H_FORMAT,
4359                 "", "",
4360                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4361                 "(bytes)", "(bytes)");
4362 }
4363 
4364 // It takes as a parameter a reference to one of the _hum_* fields, it
4365 // deduces the corresponding value for a region in a humongous region
4366 // series (either the region size, or what's left if the _hum_* field
4367 // is < the region size), and updates the _hum_* field accordingly.
4368 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4369   size_t bytes = 0;
4370   // The > 0 check is to deal with the prev and next live bytes which
4371   // could be 0.
4372   if (*hum_bytes > 0) {
4373     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4374     *hum_bytes -= bytes;
4375   }
4376   return bytes;
4377 }
4378 
4379 // It deduces the values for a region in a humongous region series
4380 // from the _hum_* fields and updates those accordingly. It assumes
4381 // that that _hum_* fields have already been set up from the "starts
4382 // humongous" region and we visit the regions in address order.
4383 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4384                                                      size_t* capacity_bytes,
4385                                                      size_t* prev_live_bytes,
4386                                                      size_t* next_live_bytes) {
4387   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4388   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4389   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4390   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4391   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4392 }
4393 
4394 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4395   const char* type       = r->get_type_str();
4396   HeapWord* bottom       = r->bottom();
4397   HeapWord* end          = r->end();
4398   size_t capacity_bytes  = r->capacity();
4399   size_t used_bytes      = r->used();
4400   size_t prev_live_bytes = r->live_bytes();
4401   size_t next_live_bytes = r->next_live_bytes();
4402   double gc_eff          = r->gc_efficiency();
4403   size_t remset_bytes    = r->rem_set()->mem_size();
4404   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4405 
4406   if (r->is_starts_humongous()) {
4407     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4408            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4409            "they should have been zeroed after the last time we used them");
4410     // Set up the _hum_* fields.
4411     _hum_capacity_bytes  = capacity_bytes;
4412     _hum_used_bytes      = used_bytes;
4413     _hum_prev_live_bytes = prev_live_bytes;
4414     _hum_next_live_bytes = next_live_bytes;
4415     get_hum_bytes(&used_bytes, &capacity_bytes,
4416                   &prev_live_bytes, &next_live_bytes);
4417     end = bottom + HeapRegion::GrainWords;
4418   } else if (r->is_continues_humongous()) {
4419     get_hum_bytes(&used_bytes, &capacity_bytes,
4420                   &prev_live_bytes, &next_live_bytes);
4421     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4422   }
4423 
4424   _total_used_bytes      += used_bytes;
4425   _total_capacity_bytes  += capacity_bytes;
4426   _total_prev_live_bytes += prev_live_bytes;
4427   _total_next_live_bytes += next_live_bytes;
4428   _total_remset_bytes    += remset_bytes;
4429   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4430 
4431   // Print a line for this particular region.
4432   _out->print_cr(G1PPRL_LINE_PREFIX
4433                  G1PPRL_TYPE_FORMAT
4434                  G1PPRL_ADDR_BASE_FORMAT
4435                  G1PPRL_BYTE_FORMAT
4436                  G1PPRL_BYTE_FORMAT
4437                  G1PPRL_BYTE_FORMAT
4438                  G1PPRL_DOUBLE_FORMAT
4439                  G1PPRL_BYTE_FORMAT
4440                  G1PPRL_BYTE_FORMAT,
4441                  type, p2i(bottom), p2i(end),
4442                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4443                  remset_bytes, strong_code_roots_bytes);
4444 
4445   return false;
4446 }
4447 
4448 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4449   // add static memory usages to remembered set sizes
4450   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4451   // Print the footer of the output.
4452   _out->print_cr(G1PPRL_LINE_PREFIX);
4453   _out->print_cr(G1PPRL_LINE_PREFIX
4454                  " SUMMARY"
4455                  G1PPRL_SUM_MB_FORMAT("capacity")
4456                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4457                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4458                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4459                  G1PPRL_SUM_MB_FORMAT("remset")
4460                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4461                  bytes_to_mb(_total_capacity_bytes),
4462                  bytes_to_mb(_total_used_bytes),
4463                  perc(_total_used_bytes, _total_capacity_bytes),
4464                  bytes_to_mb(_total_prev_live_bytes),
4465                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4466                  bytes_to_mb(_total_next_live_bytes),
4467                  perc(_total_next_live_bytes, _total_capacity_bytes),
4468                  bytes_to_mb(_total_remset_bytes),
4469                  bytes_to_mb(_total_strong_code_roots_bytes));
4470   _out->cr();
4471 }