1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  33 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  34 #include "gc_implementation/g1/g1Log.hpp"
  35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  36 #include "gc_implementation/g1/g1RemSet.hpp"
  37 #include "gc_implementation/g1/g1StringDedup.hpp"
  38 #include "gc_implementation/g1/heapRegion.inline.hpp"
  39 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
  40 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  41 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  42 #include "gc_implementation/shared/vmGCOperations.hpp"
  43 #include "gc_implementation/shared/gcTimer.hpp"
  44 #include "gc_implementation/shared/gcTrace.hpp"
  45 #include "gc_implementation/shared/gcTraceTime.hpp"
  46 #include "memory/allocation.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/referencePolicy.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/strongRootsScope.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/java.hpp"
  54 #include "runtime/atomic.inline.hpp"
  55 #include "runtime/prefetch.inline.hpp"
  56 #include "services/memTracker.hpp"
  57 #include "utilities/taskqueue.inline.hpp"
  58 
  59 // Concurrent marking bit map wrapper
  60 
  61 CMBitMapRO::CMBitMapRO(int shifter) :
  62   _bm(),
  63   _shifter(shifter) {
  64   _bmStartWord = 0;
  65   _bmWordSize = 0;
  66 }
  67 
  68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  69                                                const HeapWord* limit) const {
  70   // First we must round addr *up* to a possible object boundary.
  71   addr = (HeapWord*)align_size_up((intptr_t)addr,
  72                                   HeapWordSize << _shifter);
  73   size_t addrOffset = heapWordToOffset(addr);
  74   if (limit == NULL) {
  75     limit = _bmStartWord + _bmWordSize;
  76   }
  77   size_t limitOffset = heapWordToOffset(limit);
  78   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  79   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  80   assert(nextAddr >= addr, "get_next_one postcondition");
  81   assert(nextAddr == limit || isMarked(nextAddr),
  82          "get_next_one postcondition");
  83   return nextAddr;
  84 }
  85 
  86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  87                                                  const HeapWord* limit) const {
  88   size_t addrOffset = heapWordToOffset(addr);
  89   if (limit == NULL) {
  90     limit = _bmStartWord + _bmWordSize;
  91   }
  92   size_t limitOffset = heapWordToOffset(limit);
  93   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  94   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  95   assert(nextAddr >= addr, "get_next_one postcondition");
  96   assert(nextAddr == limit || !isMarked(nextAddr),
  97          "get_next_one postcondition");
  98   return nextAddr;
  99 }
 100 
 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
 102   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 103   return (int) (diff >> _shifter);
 104 }
 105 
 106 #ifndef PRODUCT
 107 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 108   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 109   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 110          "size inconsistency");
 111   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 112          _bmWordSize  == heap_rs.word_size();
 113 }
 114 #endif
 115 
 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 117   _bm.print_on_error(st, prefix);
 118 }
 119 
 120 size_t CMBitMap::compute_size(size_t heap_size) {
 121   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 122 }
 123 
 124 size_t CMBitMap::mark_distance() {
 125   return MinObjAlignmentInBytes * BitsPerByte;
 126 }
 127 
 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 129   _bmStartWord = heap.start();
 130   _bmWordSize = heap.word_size();
 131 
 132   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 133   _bm.set_size(_bmWordSize >> _shifter);
 134 
 135   storage->set_mapping_changed_listener(&_listener);
 136 }
 137 
 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 139   if (zero_filled) {
 140     return;
 141   }
 142   // We need to clear the bitmap on commit, removing any existing information.
 143   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 144   _bm->clearRange(mr);
 145 }
 146 
 147 // Closure used for clearing the given mark bitmap.
 148 class ClearBitmapHRClosure : public HeapRegionClosure {
 149  private:
 150   ConcurrentMark* _cm;
 151   CMBitMap* _bitmap;
 152   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 153  public:
 154   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 155     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 156   }
 157 
 158   virtual bool doHeapRegion(HeapRegion* r) {
 159     size_t const chunk_size_in_words = M / HeapWordSize;
 160 
 161     HeapWord* cur = r->bottom();
 162     HeapWord* const end = r->end();
 163 
 164     while (cur < end) {
 165       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 166       _bitmap->clearRange(mr);
 167 
 168       cur += chunk_size_in_words;
 169 
 170       // Abort iteration if after yielding the marking has been aborted.
 171       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 172         return true;
 173       }
 174       // Repeat the asserts from before the start of the closure. We will do them
 175       // as asserts here to minimize their overhead on the product. However, we
 176       // will have them as guarantees at the beginning / end of the bitmap
 177       // clearing to get some checking in the product.
 178       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 179       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 180     }
 181 
 182     return false;
 183   }
 184 };
 185 
 186 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 187   ClearBitmapHRClosure* _cl;
 188   HeapRegionClaimer     _hrclaimer;
 189   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 190 
 191 public:
 192   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 193       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 194 
 195   void work(uint worker_id) {
 196     if (_suspendible) {
 197       SuspendibleThreadSet::join();
 198     }
 199     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 200     if (_suspendible) {
 201       SuspendibleThreadSet::leave();
 202     }
 203   }
 204 };
 205 
 206 void CMBitMap::clearAll() {
 207   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 208   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 209   uint n_workers = g1h->workers()->active_workers();
 210   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 211   g1h->workers()->run_task(&task);
 212   guarantee(cl.complete(), "Must have completed iteration.");
 213   return;
 214 }
 215 
 216 void CMBitMap::markRange(MemRegion mr) {
 217   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 218   assert(!mr.is_empty(), "unexpected empty region");
 219   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 220           ((HeapWord *) mr.end())),
 221          "markRange memory region end is not card aligned");
 222   // convert address range into offset range
 223   _bm.at_put_range(heapWordToOffset(mr.start()),
 224                    heapWordToOffset(mr.end()), true);
 225 }
 226 
 227 void CMBitMap::clearRange(MemRegion mr) {
 228   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 229   assert(!mr.is_empty(), "unexpected empty region");
 230   // convert address range into offset range
 231   _bm.at_put_range(heapWordToOffset(mr.start()),
 232                    heapWordToOffset(mr.end()), false);
 233 }
 234 
 235 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 236                                             HeapWord* end_addr) {
 237   HeapWord* start = getNextMarkedWordAddress(addr);
 238   start = MIN2(start, end_addr);
 239   HeapWord* end   = getNextUnmarkedWordAddress(start);
 240   end = MIN2(end, end_addr);
 241   assert(start <= end, "Consistency check");
 242   MemRegion mr(start, end);
 243   if (!mr.is_empty()) {
 244     clearRange(mr);
 245   }
 246   return mr;
 247 }
 248 
 249 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 250   _base(NULL), _cm(cm)
 251 #ifdef ASSERT
 252   , _drain_in_progress(false)
 253   , _drain_in_progress_yields(false)
 254 #endif
 255 {}
 256 
 257 bool CMMarkStack::allocate(size_t capacity) {
 258   // allocate a stack of the requisite depth
 259   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 260   if (!rs.is_reserved()) {
 261     warning("ConcurrentMark MarkStack allocation failure");
 262     return false;
 263   }
 264   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 265   if (!_virtual_space.initialize(rs, rs.size())) {
 266     warning("ConcurrentMark MarkStack backing store failure");
 267     // Release the virtual memory reserved for the marking stack
 268     rs.release();
 269     return false;
 270   }
 271   assert(_virtual_space.committed_size() == rs.size(),
 272          "Didn't reserve backing store for all of ConcurrentMark stack?");
 273   _base = (oop*) _virtual_space.low();
 274   setEmpty();
 275   _capacity = (jint) capacity;
 276   _saved_index = -1;
 277   _should_expand = false;
 278   NOT_PRODUCT(_max_depth = 0);
 279   return true;
 280 }
 281 
 282 void CMMarkStack::expand() {
 283   // Called, during remark, if we've overflown the marking stack during marking.
 284   assert(isEmpty(), "stack should been emptied while handling overflow");
 285   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 286   // Clear expansion flag
 287   _should_expand = false;
 288   if (_capacity == (jint) MarkStackSizeMax) {
 289     if (PrintGCDetails && Verbose) {
 290       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 291     }
 292     return;
 293   }
 294   // Double capacity if possible
 295   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 296   // Do not give up existing stack until we have managed to
 297   // get the double capacity that we desired.
 298   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 299                                                            sizeof(oop)));
 300   if (rs.is_reserved()) {
 301     // Release the backing store associated with old stack
 302     _virtual_space.release();
 303     // Reinitialize virtual space for new stack
 304     if (!_virtual_space.initialize(rs, rs.size())) {
 305       fatal("Not enough swap for expanded marking stack capacity");
 306     }
 307     _base = (oop*)(_virtual_space.low());
 308     _index = 0;
 309     _capacity = new_capacity;
 310   } else {
 311     if (PrintGCDetails && Verbose) {
 312       // Failed to double capacity, continue;
 313       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 314                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 315                           _capacity / K, new_capacity / K);
 316     }
 317   }
 318 }
 319 
 320 void CMMarkStack::set_should_expand() {
 321   // If we're resetting the marking state because of an
 322   // marking stack overflow, record that we should, if
 323   // possible, expand the stack.
 324   _should_expand = _cm->has_overflown();
 325 }
 326 
 327 CMMarkStack::~CMMarkStack() {
 328   if (_base != NULL) {
 329     _base = NULL;
 330     _virtual_space.release();
 331   }
 332 }
 333 
 334 void CMMarkStack::par_push(oop ptr) {
 335   while (true) {
 336     if (isFull()) {
 337       _overflow = true;
 338       return;
 339     }
 340     // Otherwise...
 341     jint index = _index;
 342     jint next_index = index+1;
 343     jint res = Atomic::cmpxchg(next_index, &_index, index);
 344     if (res == index) {
 345       _base[index] = ptr;
 346       // Note that we don't maintain this atomically.  We could, but it
 347       // doesn't seem necessary.
 348       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 349       return;
 350     }
 351     // Otherwise, we need to try again.
 352   }
 353 }
 354 
 355 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 356   while (true) {
 357     if (isFull()) {
 358       _overflow = true;
 359       return;
 360     }
 361     // Otherwise...
 362     jint index = _index;
 363     jint next_index = index + n;
 364     if (next_index > _capacity) {
 365       _overflow = true;
 366       return;
 367     }
 368     jint res = Atomic::cmpxchg(next_index, &_index, index);
 369     if (res == index) {
 370       for (int i = 0; i < n; i++) {
 371         int  ind = index + i;
 372         assert(ind < _capacity, "By overflow test above.");
 373         _base[ind] = ptr_arr[i];
 374       }
 375       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 376       return;
 377     }
 378     // Otherwise, we need to try again.
 379   }
 380 }
 381 
 382 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 383   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 384   jint start = _index;
 385   jint next_index = start + n;
 386   if (next_index > _capacity) {
 387     _overflow = true;
 388     return;
 389   }
 390   // Otherwise.
 391   _index = next_index;
 392   for (int i = 0; i < n; i++) {
 393     int ind = start + i;
 394     assert(ind < _capacity, "By overflow test above.");
 395     _base[ind] = ptr_arr[i];
 396   }
 397   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 398 }
 399 
 400 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 401   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 402   jint index = _index;
 403   if (index == 0) {
 404     *n = 0;
 405     return false;
 406   } else {
 407     int k = MIN2(max, index);
 408     jint  new_ind = index - k;
 409     for (int j = 0; j < k; j++) {
 410       ptr_arr[j] = _base[new_ind + j];
 411     }
 412     _index = new_ind;
 413     *n = k;
 414     return true;
 415   }
 416 }
 417 
 418 template<class OopClosureClass>
 419 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 420   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 421          || SafepointSynchronize::is_at_safepoint(),
 422          "Drain recursion must be yield-safe.");
 423   bool res = true;
 424   debug_only(_drain_in_progress = true);
 425   debug_only(_drain_in_progress_yields = yield_after);
 426   while (!isEmpty()) {
 427     oop newOop = pop();
 428     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 429     assert(newOop->is_oop(), "Expected an oop");
 430     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 431            "only grey objects on this stack");
 432     newOop->oop_iterate(cl);
 433     if (yield_after && _cm->do_yield_check()) {
 434       res = false;
 435       break;
 436     }
 437   }
 438   debug_only(_drain_in_progress = false);
 439   return res;
 440 }
 441 
 442 void CMMarkStack::note_start_of_gc() {
 443   assert(_saved_index == -1,
 444          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 445   _saved_index = _index;
 446 }
 447 
 448 void CMMarkStack::note_end_of_gc() {
 449   // This is intentionally a guarantee, instead of an assert. If we
 450   // accidentally add something to the mark stack during GC, it
 451   // will be a correctness issue so it's better if we crash. we'll
 452   // only check this once per GC anyway, so it won't be a performance
 453   // issue in any way.
 454   guarantee(_saved_index == _index,
 455             err_msg("saved index: %d index: %d", _saved_index, _index));
 456   _saved_index = -1;
 457 }
 458 
 459 void CMMarkStack::oops_do(OopClosure* f) {
 460   assert(_saved_index == _index,
 461          err_msg("saved index: %d index: %d", _saved_index, _index));
 462   for (int i = 0; i < _index; i += 1) {
 463     f->do_oop(&_base[i]);
 464   }
 465 }
 466 
 467 CMRootRegions::CMRootRegions() :
 468   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 469   _should_abort(false),  _next_survivor(NULL) { }
 470 
 471 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 472   _young_list = g1h->young_list();
 473   _cm = cm;
 474 }
 475 
 476 void CMRootRegions::prepare_for_scan() {
 477   assert(!scan_in_progress(), "pre-condition");
 478 
 479   // Currently, only survivors can be root regions.
 480   assert(_next_survivor == NULL, "pre-condition");
 481   _next_survivor = _young_list->first_survivor_region();
 482   _scan_in_progress = (_next_survivor != NULL);
 483   _should_abort = false;
 484 }
 485 
 486 HeapRegion* CMRootRegions::claim_next() {
 487   if (_should_abort) {
 488     // If someone has set the should_abort flag, we return NULL to
 489     // force the caller to bail out of their loop.
 490     return NULL;
 491   }
 492 
 493   // Currently, only survivors can be root regions.
 494   HeapRegion* res = _next_survivor;
 495   if (res != NULL) {
 496     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 497     // Read it again in case it changed while we were waiting for the lock.
 498     res = _next_survivor;
 499     if (res != NULL) {
 500       if (res == _young_list->last_survivor_region()) {
 501         // We just claimed the last survivor so store NULL to indicate
 502         // that we're done.
 503         _next_survivor = NULL;
 504       } else {
 505         _next_survivor = res->get_next_young_region();
 506       }
 507     } else {
 508       // Someone else claimed the last survivor while we were trying
 509       // to take the lock so nothing else to do.
 510     }
 511   }
 512   assert(res == NULL || res->is_survivor(), "post-condition");
 513 
 514   return res;
 515 }
 516 
 517 void CMRootRegions::scan_finished() {
 518   assert(scan_in_progress(), "pre-condition");
 519 
 520   // Currently, only survivors can be root regions.
 521   if (!_should_abort) {
 522     assert(_next_survivor == NULL, "we should have claimed all survivors");
 523   }
 524   _next_survivor = NULL;
 525 
 526   {
 527     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 528     _scan_in_progress = false;
 529     RootRegionScan_lock->notify_all();
 530   }
 531 }
 532 
 533 bool CMRootRegions::wait_until_scan_finished() {
 534   if (!scan_in_progress()) return false;
 535 
 536   {
 537     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 538     while (scan_in_progress()) {
 539       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 540     }
 541   }
 542   return true;
 543 }
 544 
 545 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 546 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 547 #endif // _MSC_VER
 548 
 549 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 550   return MAX2((n_par_threads + 2) / 4, 1U);
 551 }
 552 
 553 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 554   _g1h(g1h),
 555   _markBitMap1(),
 556   _markBitMap2(),
 557   _parallel_marking_threads(0),
 558   _max_parallel_marking_threads(0),
 559   _sleep_factor(0.0),
 560   _marking_task_overhead(1.0),
 561   _cleanup_sleep_factor(0.0),
 562   _cleanup_task_overhead(1.0),
 563   _cleanup_list("Cleanup List"),
 564   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 565   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 566             CardTableModRefBS::card_shift,
 567             false /* in_resource_area*/),
 568 
 569   _prevMarkBitMap(&_markBitMap1),
 570   _nextMarkBitMap(&_markBitMap2),
 571 
 572   _markStack(this),
 573   // _finger set in set_non_marking_state
 574 
 575   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 576   // _active_tasks set in set_non_marking_state
 577   // _tasks set inside the constructor
 578   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 579   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 580 
 581   _has_overflown(false),
 582   _concurrent(false),
 583   _has_aborted(false),
 584   _aborted_gc_id(GCId::undefined()),
 585   _restart_for_overflow(false),
 586   _concurrent_marking_in_progress(false),
 587 
 588   // _verbose_level set below
 589 
 590   _init_times(),
 591   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 592   _cleanup_times(),
 593   _total_counting_time(0.0),
 594   _total_rs_scrub_time(0.0),
 595 
 596   _parallel_workers(NULL),
 597 
 598   _count_card_bitmaps(NULL),
 599   _count_marked_bytes(NULL),
 600   _completed_initialization(false) {
 601   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 602   if (verbose_level < no_verbose) {
 603     verbose_level = no_verbose;
 604   }
 605   if (verbose_level > high_verbose) {
 606     verbose_level = high_verbose;
 607   }
 608   _verbose_level = verbose_level;
 609 
 610   if (verbose_low()) {
 611     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 612                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 613   }
 614 
 615   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 616   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 617 
 618   // Create & start a ConcurrentMark thread.
 619   _cmThread = new ConcurrentMarkThread(this);
 620   assert(cmThread() != NULL, "CM Thread should have been created");
 621   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 622   if (_cmThread->osthread() == NULL) {
 623       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 624   }
 625 
 626   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 627   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 628   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 629 
 630   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 631   satb_qs.set_buffer_size(G1SATBBufferSize);
 632 
 633   _root_regions.init(_g1h, this);
 634 
 635   if (ConcGCThreads > ParallelGCThreads) {
 636     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 637             "than ParallelGCThreads (" UINTX_FORMAT ").",
 638             ConcGCThreads, ParallelGCThreads);
 639     return;
 640   }
 641   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 642     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 643     // if both are set
 644     _sleep_factor             = 0.0;
 645     _marking_task_overhead    = 1.0;
 646   } else if (G1MarkingOverheadPercent > 0) {
 647     // We will calculate the number of parallel marking threads based
 648     // on a target overhead with respect to the soft real-time goal
 649     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 650     double overall_cm_overhead =
 651       (double) MaxGCPauseMillis * marking_overhead /
 652       (double) GCPauseIntervalMillis;
 653     double cpu_ratio = 1.0 / (double) os::processor_count();
 654     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 655     double marking_task_overhead =
 656       overall_cm_overhead / marking_thread_num *
 657                                               (double) os::processor_count();
 658     double sleep_factor =
 659                        (1.0 - marking_task_overhead) / marking_task_overhead;
 660 
 661     FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 662     _sleep_factor             = sleep_factor;
 663     _marking_task_overhead    = marking_task_overhead;
 664   } else {
 665     // Calculate the number of parallel marking threads by scaling
 666     // the number of parallel GC threads.
 667     uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 668     FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 669     _sleep_factor             = 0.0;
 670     _marking_task_overhead    = 1.0;
 671   }
 672 
 673   assert(ConcGCThreads > 0, "Should have been set");
 674   _parallel_marking_threads = (uint) ConcGCThreads;
 675   _max_parallel_marking_threads = _parallel_marking_threads;
 676 
 677   if (parallel_marking_threads() > 1) {
 678     _cleanup_task_overhead = 1.0;
 679   } else {
 680     _cleanup_task_overhead = marking_task_overhead();
 681   }
 682   _cleanup_sleep_factor =
 683                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 684 
 685 #if 0
 686   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 687   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 688   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 689   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 690   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 691 #endif
 692 
 693   _parallel_workers = new FlexibleWorkGang("G1 Marker",
 694        _max_parallel_marking_threads, false, true);
 695   if (_parallel_workers == NULL) {
 696     vm_exit_during_initialization("Failed necessary allocation.");
 697   } else {
 698     _parallel_workers->initialize_workers();
 699   }
 700 
 701   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 702     size_t mark_stack_size =
 703       MIN2(MarkStackSizeMax,
 704           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 705     // Verify that the calculated value for MarkStackSize is in range.
 706     // It would be nice to use the private utility routine from Arguments.
 707     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 708       warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 709               "must be between 1 and " SIZE_FORMAT,
 710               mark_stack_size, MarkStackSizeMax);
 711       return;
 712     }
 713     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 714   } else {
 715     // Verify MarkStackSize is in range.
 716     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 717       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 718         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 719           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 720                   "must be between 1 and " SIZE_FORMAT,
 721                   MarkStackSize, MarkStackSizeMax);
 722           return;
 723         }
 724       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 725         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 726           warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 727                   " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 728                   MarkStackSize, MarkStackSizeMax);
 729           return;
 730         }
 731       }
 732     }
 733   }
 734 
 735   if (!_markStack.allocate(MarkStackSize)) {
 736     warning("Failed to allocate CM marking stack");
 737     return;
 738   }
 739 
 740   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 741   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 742 
 743   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 744   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 745 
 746   BitMap::idx_t card_bm_size = _card_bm.size();
 747 
 748   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 749   _active_tasks = _max_worker_id;
 750 
 751   uint max_regions = _g1h->max_regions();
 752   for (uint i = 0; i < _max_worker_id; ++i) {
 753     CMTaskQueue* task_queue = new CMTaskQueue();
 754     task_queue->initialize();
 755     _task_queues->register_queue(i, task_queue);
 756 
 757     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 758     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 759 
 760     _tasks[i] = new CMTask(i, this,
 761                            _count_marked_bytes[i],
 762                            &_count_card_bitmaps[i],
 763                            task_queue, _task_queues);
 764 
 765     _accum_task_vtime[i] = 0.0;
 766   }
 767 
 768   // Calculate the card number for the bottom of the heap. Used
 769   // in biasing indexes into the accounting card bitmaps.
 770   _heap_bottom_card_num =
 771     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 772                                 CardTableModRefBS::card_shift);
 773 
 774   // Clear all the liveness counting data
 775   clear_all_count_data();
 776 
 777   // so that the call below can read a sensible value
 778   _heap_start = g1h->reserved_region().start();
 779   set_non_marking_state();
 780   _completed_initialization = true;
 781 }
 782 
 783 void ConcurrentMark::reset() {
 784   // Starting values for these two. This should be called in a STW
 785   // phase.
 786   MemRegion reserved = _g1h->g1_reserved();
 787   _heap_start = reserved.start();
 788   _heap_end   = reserved.end();
 789 
 790   // Separated the asserts so that we know which one fires.
 791   assert(_heap_start != NULL, "heap bounds should look ok");
 792   assert(_heap_end != NULL, "heap bounds should look ok");
 793   assert(_heap_start < _heap_end, "heap bounds should look ok");
 794 
 795   // Reset all the marking data structures and any necessary flags
 796   reset_marking_state();
 797 
 798   if (verbose_low()) {
 799     gclog_or_tty->print_cr("[global] resetting");
 800   }
 801 
 802   // We do reset all of them, since different phases will use
 803   // different number of active threads. So, it's easiest to have all
 804   // of them ready.
 805   for (uint i = 0; i < _max_worker_id; ++i) {
 806     _tasks[i]->reset(_nextMarkBitMap);
 807   }
 808 
 809   // we need this to make sure that the flag is on during the evac
 810   // pause with initial mark piggy-backed
 811   set_concurrent_marking_in_progress();
 812 }
 813 
 814 
 815 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 816   _markStack.set_should_expand();
 817   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 818   if (clear_overflow) {
 819     clear_has_overflown();
 820   } else {
 821     assert(has_overflown(), "pre-condition");
 822   }
 823   _finger = _heap_start;
 824 
 825   for (uint i = 0; i < _max_worker_id; ++i) {
 826     CMTaskQueue* queue = _task_queues->queue(i);
 827     queue->set_empty();
 828   }
 829 }
 830 
 831 void ConcurrentMark::set_concurrency(uint active_tasks) {
 832   assert(active_tasks <= _max_worker_id, "we should not have more");
 833 
 834   _active_tasks = active_tasks;
 835   // Need to update the three data structures below according to the
 836   // number of active threads for this phase.
 837   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 838   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 839   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 840 }
 841 
 842 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 843   set_concurrency(active_tasks);
 844 
 845   _concurrent = concurrent;
 846   // We propagate this to all tasks, not just the active ones.
 847   for (uint i = 0; i < _max_worker_id; ++i)
 848     _tasks[i]->set_concurrent(concurrent);
 849 
 850   if (concurrent) {
 851     set_concurrent_marking_in_progress();
 852   } else {
 853     // We currently assume that the concurrent flag has been set to
 854     // false before we start remark. At this point we should also be
 855     // in a STW phase.
 856     assert(!concurrent_marking_in_progress(), "invariant");
 857     assert(out_of_regions(),
 858            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 859                    p2i(_finger), p2i(_heap_end)));
 860   }
 861 }
 862 
 863 void ConcurrentMark::set_non_marking_state() {
 864   // We set the global marking state to some default values when we're
 865   // not doing marking.
 866   reset_marking_state();
 867   _active_tasks = 0;
 868   clear_concurrent_marking_in_progress();
 869 }
 870 
 871 ConcurrentMark::~ConcurrentMark() {
 872   // The ConcurrentMark instance is never freed.
 873   ShouldNotReachHere();
 874 }
 875 
 876 void ConcurrentMark::clearNextBitmap() {
 877   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 878 
 879   // Make sure that the concurrent mark thread looks to still be in
 880   // the current cycle.
 881   guarantee(cmThread()->during_cycle(), "invariant");
 882 
 883   // We are finishing up the current cycle by clearing the next
 884   // marking bitmap and getting it ready for the next cycle. During
 885   // this time no other cycle can start. So, let's make sure that this
 886   // is the case.
 887   guarantee(!g1h->mark_in_progress(), "invariant");
 888 
 889   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 890   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 891   _parallel_workers->run_task(&task);
 892 
 893   // Clear the liveness counting data. If the marking has been aborted, the abort()
 894   // call already did that.
 895   if (cl.complete()) {
 896     clear_all_count_data();
 897   }
 898 
 899   // Repeat the asserts from above.
 900   guarantee(cmThread()->during_cycle(), "invariant");
 901   guarantee(!g1h->mark_in_progress(), "invariant");
 902 }
 903 
 904 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 905   CMBitMap* _bitmap;
 906   bool _error;
 907  public:
 908   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 909   }
 910 
 911   virtual bool doHeapRegion(HeapRegion* r) {
 912     // This closure can be called concurrently to the mutator, so we must make sure
 913     // that the result of the getNextMarkedWordAddress() call is compared to the
 914     // value passed to it as limit to detect any found bits.
 915     // We can use the region's orig_end() for the limit and the comparison value
 916     // as it always contains the "real" end of the region that never changes and
 917     // has no side effects.
 918     // Due to the latter, there can also be no problem with the compiler generating
 919     // reloads of the orig_end() call.
 920     HeapWord* end = r->orig_end();
 921     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 922   }
 923 };
 924 
 925 bool ConcurrentMark::nextMarkBitmapIsClear() {
 926   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 927   _g1h->heap_region_iterate(&cl);
 928   return cl.complete();
 929 }
 930 
 931 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 932 public:
 933   bool doHeapRegion(HeapRegion* r) {
 934     if (!r->is_continues_humongous()) {
 935       r->note_start_of_marking();
 936     }
 937     return false;
 938   }
 939 };
 940 
 941 void ConcurrentMark::checkpointRootsInitialPre() {
 942   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 943   G1CollectorPolicy* g1p = g1h->g1_policy();
 944 
 945   _has_aborted = false;
 946 
 947   // Initialize marking structures. This has to be done in a STW phase.
 948   reset();
 949 
 950   // For each region note start of marking.
 951   NoteStartOfMarkHRClosure startcl;
 952   g1h->heap_region_iterate(&startcl);
 953 }
 954 
 955 
 956 void ConcurrentMark::checkpointRootsInitialPost() {
 957   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 958 
 959   // If we force an overflow during remark, the remark operation will
 960   // actually abort and we'll restart concurrent marking. If we always
 961   // force an overflow during remark we'll never actually complete the
 962   // marking phase. So, we initialize this here, at the start of the
 963   // cycle, so that at the remaining overflow number will decrease at
 964   // every remark and we'll eventually not need to cause one.
 965   force_overflow_stw()->init();
 966 
 967   // Start Concurrent Marking weak-reference discovery.
 968   ReferenceProcessor* rp = g1h->ref_processor_cm();
 969   // enable ("weak") refs discovery
 970   rp->enable_discovery();
 971   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 972 
 973   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 974   // This is the start of  the marking cycle, we're expected all
 975   // threads to have SATB queues with active set to false.
 976   satb_mq_set.set_active_all_threads(true, /* new active value */
 977                                      false /* expected_active */);
 978 
 979   _root_regions.prepare_for_scan();
 980 
 981   // update_g1_committed() will be called at the end of an evac pause
 982   // when marking is on. So, it's also called at the end of the
 983   // initial-mark pause to update the heap end, if the heap expands
 984   // during it. No need to call it here.
 985 }
 986 
 987 /*
 988  * Notice that in the next two methods, we actually leave the STS
 989  * during the barrier sync and join it immediately afterwards. If we
 990  * do not do this, the following deadlock can occur: one thread could
 991  * be in the barrier sync code, waiting for the other thread to also
 992  * sync up, whereas another one could be trying to yield, while also
 993  * waiting for the other threads to sync up too.
 994  *
 995  * Note, however, that this code is also used during remark and in
 996  * this case we should not attempt to leave / enter the STS, otherwise
 997  * we'll either hit an assert (debug / fastdebug) or deadlock
 998  * (product). So we should only leave / enter the STS if we are
 999  * operating concurrently.
1000  *
1001  * Because the thread that does the sync barrier has left the STS, it
1002  * is possible to be suspended for a Full GC or an evacuation pause
1003  * could occur. This is actually safe, since the entering the sync
1004  * barrier is one of the last things do_marking_step() does, and it
1005  * doesn't manipulate any data structures afterwards.
1006  */
1007 
1008 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
1009   if (verbose_low()) {
1010     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
1011   }
1012 
1013   if (concurrent()) {
1014     SuspendibleThreadSet::leave();
1015   }
1016 
1017   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
1018 
1019   if (concurrent()) {
1020     SuspendibleThreadSet::join();
1021   }
1022   // at this point everyone should have synced up and not be doing any
1023   // more work
1024 
1025   if (verbose_low()) {
1026     if (barrier_aborted) {
1027       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1028     } else {
1029       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1030     }
1031   }
1032 
1033   if (barrier_aborted) {
1034     // If the barrier aborted we ignore the overflow condition and
1035     // just abort the whole marking phase as quickly as possible.
1036     return;
1037   }
1038 
1039   // If we're executing the concurrent phase of marking, reset the marking
1040   // state; otherwise the marking state is reset after reference processing,
1041   // during the remark pause.
1042   // If we reset here as a result of an overflow during the remark we will
1043   // see assertion failures from any subsequent set_concurrency_and_phase()
1044   // calls.
1045   if (concurrent()) {
1046     // let the task associated with with worker 0 do this
1047     if (worker_id == 0) {
1048       // task 0 is responsible for clearing the global data structures
1049       // We should be here because of an overflow. During STW we should
1050       // not clear the overflow flag since we rely on it being true when
1051       // we exit this method to abort the pause and restart concurrent
1052       // marking.
1053       reset_marking_state(true /* clear_overflow */);
1054       force_overflow()->update();
1055 
1056       if (G1Log::fine()) {
1057         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1058         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1059       }
1060     }
1061   }
1062 
1063   // after this, each task should reset its own data structures then
1064   // then go into the second barrier
1065 }
1066 
1067 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1068   if (verbose_low()) {
1069     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1070   }
1071 
1072   if (concurrent()) {
1073     SuspendibleThreadSet::leave();
1074   }
1075 
1076   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1077 
1078   if (concurrent()) {
1079     SuspendibleThreadSet::join();
1080   }
1081   // at this point everything should be re-initialized and ready to go
1082 
1083   if (verbose_low()) {
1084     if (barrier_aborted) {
1085       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1086     } else {
1087       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1088     }
1089   }
1090 }
1091 
1092 #ifndef PRODUCT
1093 void ForceOverflowSettings::init() {
1094   _num_remaining = G1ConcMarkForceOverflow;
1095   _force = false;
1096   update();
1097 }
1098 
1099 void ForceOverflowSettings::update() {
1100   if (_num_remaining > 0) {
1101     _num_remaining -= 1;
1102     _force = true;
1103   } else {
1104     _force = false;
1105   }
1106 }
1107 
1108 bool ForceOverflowSettings::should_force() {
1109   if (_force) {
1110     _force = false;
1111     return true;
1112   } else {
1113     return false;
1114   }
1115 }
1116 #endif // !PRODUCT
1117 
1118 class CMConcurrentMarkingTask: public AbstractGangTask {
1119 private:
1120   ConcurrentMark*       _cm;
1121   ConcurrentMarkThread* _cmt;
1122 
1123 public:
1124   void work(uint worker_id) {
1125     assert(Thread::current()->is_ConcurrentGC_thread(),
1126            "this should only be done by a conc GC thread");
1127     ResourceMark rm;
1128 
1129     double start_vtime = os::elapsedVTime();
1130 
1131     SuspendibleThreadSet::join();
1132 
1133     assert(worker_id < _cm->active_tasks(), "invariant");
1134     CMTask* the_task = _cm->task(worker_id);
1135     the_task->record_start_time();
1136     if (!_cm->has_aborted()) {
1137       do {
1138         double start_vtime_sec = os::elapsedVTime();
1139         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1140 
1141         the_task->do_marking_step(mark_step_duration_ms,
1142                                   true  /* do_termination */,
1143                                   false /* is_serial*/);
1144 
1145         double end_vtime_sec = os::elapsedVTime();
1146         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1147         _cm->clear_has_overflown();
1148 
1149         _cm->do_yield_check(worker_id);
1150 
1151         jlong sleep_time_ms;
1152         if (!_cm->has_aborted() && the_task->has_aborted()) {
1153           sleep_time_ms =
1154             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1155           SuspendibleThreadSet::leave();
1156           os::sleep(Thread::current(), sleep_time_ms, false);
1157           SuspendibleThreadSet::join();
1158         }
1159       } while (!_cm->has_aborted() && the_task->has_aborted());
1160     }
1161     the_task->record_end_time();
1162     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1163 
1164     SuspendibleThreadSet::leave();
1165 
1166     double end_vtime = os::elapsedVTime();
1167     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1168   }
1169 
1170   CMConcurrentMarkingTask(ConcurrentMark* cm,
1171                           ConcurrentMarkThread* cmt) :
1172       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1173 
1174   ~CMConcurrentMarkingTask() { }
1175 };
1176 
1177 // Calculates the number of active workers for a concurrent
1178 // phase.
1179 uint ConcurrentMark::calc_parallel_marking_threads() {
1180   uint n_conc_workers = 0;
1181   if (!UseDynamicNumberOfGCThreads ||
1182       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1183        !ForceDynamicNumberOfGCThreads)) {
1184     n_conc_workers = max_parallel_marking_threads();
1185   } else {
1186     n_conc_workers =
1187       AdaptiveSizePolicy::calc_default_active_workers(
1188                                    max_parallel_marking_threads(),
1189                                    1, /* Minimum workers */
1190                                    parallel_marking_threads(),
1191                                    Threads::number_of_non_daemon_threads());
1192     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1193     // that scaling has already gone into "_max_parallel_marking_threads".
1194   }
1195   assert(n_conc_workers > 0, "Always need at least 1");
1196   return n_conc_workers;
1197 }
1198 
1199 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1200   // Currently, only survivors can be root regions.
1201   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1202   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1203 
1204   const uintx interval = PrefetchScanIntervalInBytes;
1205   HeapWord* curr = hr->bottom();
1206   const HeapWord* end = hr->top();
1207   while (curr < end) {
1208     Prefetch::read(curr, interval);
1209     oop obj = oop(curr);
1210     int size = obj->oop_iterate(&cl);
1211     assert(size == obj->size(), "sanity");
1212     curr += size;
1213   }
1214 }
1215 
1216 class CMRootRegionScanTask : public AbstractGangTask {
1217 private:
1218   ConcurrentMark* _cm;
1219 
1220 public:
1221   CMRootRegionScanTask(ConcurrentMark* cm) :
1222     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1223 
1224   void work(uint worker_id) {
1225     assert(Thread::current()->is_ConcurrentGC_thread(),
1226            "this should only be done by a conc GC thread");
1227 
1228     CMRootRegions* root_regions = _cm->root_regions();
1229     HeapRegion* hr = root_regions->claim_next();
1230     while (hr != NULL) {
1231       _cm->scanRootRegion(hr, worker_id);
1232       hr = root_regions->claim_next();
1233     }
1234   }
1235 };
1236 
1237 void ConcurrentMark::scanRootRegions() {
1238   // Start of concurrent marking.
1239   ClassLoaderDataGraph::clear_claimed_marks();
1240 
1241   // scan_in_progress() will have been set to true only if there was
1242   // at least one root region to scan. So, if it's false, we
1243   // should not attempt to do any further work.
1244   if (root_regions()->scan_in_progress()) {
1245     _parallel_marking_threads = calc_parallel_marking_threads();
1246     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1247            "Maximum number of marking threads exceeded");
1248     uint active_workers = MAX2(1U, parallel_marking_threads());
1249 
1250     CMRootRegionScanTask task(this);
1251     _parallel_workers->set_active_workers(active_workers);
1252     _parallel_workers->run_task(&task);
1253 
1254     // It's possible that has_aborted() is true here without actually
1255     // aborting the survivor scan earlier. This is OK as it's
1256     // mainly used for sanity checking.
1257     root_regions()->scan_finished();
1258   }
1259 }
1260 
1261 void ConcurrentMark::markFromRoots() {
1262   // we might be tempted to assert that:
1263   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1264   //        "inconsistent argument?");
1265   // However that wouldn't be right, because it's possible that
1266   // a safepoint is indeed in progress as a younger generation
1267   // stop-the-world GC happens even as we mark in this generation.
1268 
1269   _restart_for_overflow = false;
1270   force_overflow_conc()->init();
1271 
1272   // _g1h has _n_par_threads
1273   _parallel_marking_threads = calc_parallel_marking_threads();
1274   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1275     "Maximum number of marking threads exceeded");
1276 
1277   uint active_workers = MAX2(1U, parallel_marking_threads());
1278 
1279   // Parallel task terminator is set in "set_concurrency_and_phase()"
1280   set_concurrency_and_phase(active_workers, true /* concurrent */);
1281 
1282   CMConcurrentMarkingTask markingTask(this, cmThread());
1283   _parallel_workers->set_active_workers(active_workers);
1284   // Don't set _n_par_threads because it affects MT in process_roots()
1285   // and the decisions on that MT processing is made elsewhere.
1286   assert(_parallel_workers->active_workers() > 0, "Should have been set");
1287   _parallel_workers->run_task(&markingTask);
1288   print_stats();
1289 }
1290 
1291 // Helper class to get rid of some boilerplate code.
1292 class G1CMTraceTime : public GCTraceTime {
1293   static bool doit_and_prepend(bool doit) {
1294     if (doit) {
1295       gclog_or_tty->put(' ');
1296     }
1297     return doit;
1298   }
1299 
1300  public:
1301   G1CMTraceTime(const char* title, bool doit)
1302     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
1303         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
1304   }
1305 };
1306 
1307 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1308   // world is stopped at this checkpoint
1309   assert(SafepointSynchronize::is_at_safepoint(),
1310          "world should be stopped");
1311 
1312   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1313 
1314   // If a full collection has happened, we shouldn't do this.
1315   if (has_aborted()) {
1316     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1317     return;
1318   }
1319 
1320   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1321 
1322   if (VerifyDuringGC) {
1323     HandleMark hm;  // handle scope
1324     g1h->prepare_for_verify();
1325     Universe::verify(VerifyOption_G1UsePrevMarking,
1326                      " VerifyDuringGC:(before)");
1327   }
1328   g1h->check_bitmaps("Remark Start");
1329 
1330   G1CollectorPolicy* g1p = g1h->g1_policy();
1331   g1p->record_concurrent_mark_remark_start();
1332 
1333   double start = os::elapsedTime();
1334 
1335   checkpointRootsFinalWork();
1336 
1337   double mark_work_end = os::elapsedTime();
1338 
1339   weakRefsWork(clear_all_soft_refs);
1340 
1341   if (has_overflown()) {
1342     // Oops.  We overflowed.  Restart concurrent marking.
1343     _restart_for_overflow = true;
1344     if (G1TraceMarkStackOverflow) {
1345       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1346     }
1347 
1348     // Verify the heap w.r.t. the previous marking bitmap.
1349     if (VerifyDuringGC) {
1350       HandleMark hm;  // handle scope
1351       g1h->prepare_for_verify();
1352       Universe::verify(VerifyOption_G1UsePrevMarking,
1353                        " VerifyDuringGC:(overflow)");
1354     }
1355 
1356     // Clear the marking state because we will be restarting
1357     // marking due to overflowing the global mark stack.
1358     reset_marking_state();
1359   } else {
1360     {
1361       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1362 
1363       // Aggregate the per-task counting data that we have accumulated
1364       // while marking.
1365       aggregate_count_data();
1366     }
1367 
1368     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1369     // We're done with marking.
1370     // This is the end of  the marking cycle, we're expected all
1371     // threads to have SATB queues with active set to true.
1372     satb_mq_set.set_active_all_threads(false, /* new active value */
1373                                        true /* expected_active */);
1374 
1375     if (VerifyDuringGC) {
1376       HandleMark hm;  // handle scope
1377       g1h->prepare_for_verify();
1378       Universe::verify(VerifyOption_G1UseNextMarking,
1379                        " VerifyDuringGC:(after)");
1380     }
1381     g1h->check_bitmaps("Remark End");
1382     assert(!restart_for_overflow(), "sanity");
1383     // Completely reset the marking state since marking completed
1384     set_non_marking_state();
1385   }
1386 
1387   // Expand the marking stack, if we have to and if we can.
1388   if (_markStack.should_expand()) {
1389     _markStack.expand();
1390   }
1391 
1392   // Statistics
1393   double now = os::elapsedTime();
1394   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1395   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1396   _remark_times.add((now - start) * 1000.0);
1397 
1398   g1p->record_concurrent_mark_remark_end();
1399 
1400   G1CMIsAliveClosure is_alive(g1h);
1401   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1402 }
1403 
1404 // Base class of the closures that finalize and verify the
1405 // liveness counting data.
1406 class CMCountDataClosureBase: public HeapRegionClosure {
1407 protected:
1408   G1CollectedHeap* _g1h;
1409   ConcurrentMark* _cm;
1410   CardTableModRefBS* _ct_bs;
1411 
1412   BitMap* _region_bm;
1413   BitMap* _card_bm;
1414 
1415   // Takes a region that's not empty (i.e., it has at least one
1416   // live object in it and sets its corresponding bit on the region
1417   // bitmap to 1. If the region is "starts humongous" it will also set
1418   // to 1 the bits on the region bitmap that correspond to its
1419   // associated "continues humongous" regions.
1420   void set_bit_for_region(HeapRegion* hr) {
1421     assert(!hr->is_continues_humongous(), "should have filtered those out");
1422 
1423     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1424     if (!hr->is_starts_humongous()) {
1425       // Normal (non-humongous) case: just set the bit.
1426       _region_bm->par_at_put(index, true);
1427     } else {
1428       // Starts humongous case: calculate how many regions are part of
1429       // this humongous region and then set the bit range.
1430       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1431       _region_bm->par_at_put_range(index, end_index, true);
1432     }
1433   }
1434 
1435 public:
1436   CMCountDataClosureBase(G1CollectedHeap* g1h,
1437                          BitMap* region_bm, BitMap* card_bm):
1438     _g1h(g1h), _cm(g1h->concurrent_mark()),
1439     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1440     _region_bm(region_bm), _card_bm(card_bm) { }
1441 };
1442 
1443 // Closure that calculates the # live objects per region. Used
1444 // for verification purposes during the cleanup pause.
1445 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1446   CMBitMapRO* _bm;
1447   size_t _region_marked_bytes;
1448 
1449 public:
1450   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1451                          BitMap* region_bm, BitMap* card_bm) :
1452     CMCountDataClosureBase(g1h, region_bm, card_bm),
1453     _bm(bm), _region_marked_bytes(0) { }
1454 
1455   bool doHeapRegion(HeapRegion* hr) {
1456 
1457     if (hr->is_continues_humongous()) {
1458       // We will ignore these here and process them when their
1459       // associated "starts humongous" region is processed (see
1460       // set_bit_for_heap_region()). Note that we cannot rely on their
1461       // associated "starts humongous" region to have their bit set to
1462       // 1 since, due to the region chunking in the parallel region
1463       // iteration, a "continues humongous" region might be visited
1464       // before its associated "starts humongous".
1465       return false;
1466     }
1467 
1468     HeapWord* ntams = hr->next_top_at_mark_start();
1469     HeapWord* start = hr->bottom();
1470 
1471     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1472            err_msg("Preconditions not met - "
1473                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1474                    p2i(start), p2i(ntams), p2i(hr->end())));
1475 
1476     // Find the first marked object at or after "start".
1477     start = _bm->getNextMarkedWordAddress(start, ntams);
1478 
1479     size_t marked_bytes = 0;
1480 
1481     while (start < ntams) {
1482       oop obj = oop(start);
1483       int obj_sz = obj->size();
1484       HeapWord* obj_end = start + obj_sz;
1485 
1486       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1487       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1488 
1489       // Note: if we're looking at the last region in heap - obj_end
1490       // could be actually just beyond the end of the heap; end_idx
1491       // will then correspond to a (non-existent) card that is also
1492       // just beyond the heap.
1493       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1494         // end of object is not card aligned - increment to cover
1495         // all the cards spanned by the object
1496         end_idx += 1;
1497       }
1498 
1499       // Set the bits in the card BM for the cards spanned by this object.
1500       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1501 
1502       // Add the size of this object to the number of marked bytes.
1503       marked_bytes += (size_t)obj_sz * HeapWordSize;
1504 
1505       // Find the next marked object after this one.
1506       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1507     }
1508 
1509     // Mark the allocated-since-marking portion...
1510     HeapWord* top = hr->top();
1511     if (ntams < top) {
1512       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1513       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1514 
1515       // Note: if we're looking at the last region in heap - top
1516       // could be actually just beyond the end of the heap; end_idx
1517       // will then correspond to a (non-existent) card that is also
1518       // just beyond the heap.
1519       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1520         // end of object is not card aligned - increment to cover
1521         // all the cards spanned by the object
1522         end_idx += 1;
1523       }
1524       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1525 
1526       // This definitely means the region has live objects.
1527       set_bit_for_region(hr);
1528     }
1529 
1530     // Update the live region bitmap.
1531     if (marked_bytes > 0) {
1532       set_bit_for_region(hr);
1533     }
1534 
1535     // Set the marked bytes for the current region so that
1536     // it can be queried by a calling verification routine
1537     _region_marked_bytes = marked_bytes;
1538 
1539     return false;
1540   }
1541 
1542   size_t region_marked_bytes() const { return _region_marked_bytes; }
1543 };
1544 
1545 // Heap region closure used for verifying the counting data
1546 // that was accumulated concurrently and aggregated during
1547 // the remark pause. This closure is applied to the heap
1548 // regions during the STW cleanup pause.
1549 
1550 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1551   G1CollectedHeap* _g1h;
1552   ConcurrentMark* _cm;
1553   CalcLiveObjectsClosure _calc_cl;
1554   BitMap* _region_bm;   // Region BM to be verified
1555   BitMap* _card_bm;     // Card BM to be verified
1556   bool _verbose;        // verbose output?
1557 
1558   BitMap* _exp_region_bm; // Expected Region BM values
1559   BitMap* _exp_card_bm;   // Expected card BM values
1560 
1561   int _failures;
1562 
1563 public:
1564   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1565                                 BitMap* region_bm,
1566                                 BitMap* card_bm,
1567                                 BitMap* exp_region_bm,
1568                                 BitMap* exp_card_bm,
1569                                 bool verbose) :
1570     _g1h(g1h), _cm(g1h->concurrent_mark()),
1571     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1572     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1573     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1574     _failures(0) { }
1575 
1576   int failures() const { return _failures; }
1577 
1578   bool doHeapRegion(HeapRegion* hr) {
1579     if (hr->is_continues_humongous()) {
1580       // We will ignore these here and process them when their
1581       // associated "starts humongous" region is processed (see
1582       // set_bit_for_heap_region()). Note that we cannot rely on their
1583       // associated "starts humongous" region to have their bit set to
1584       // 1 since, due to the region chunking in the parallel region
1585       // iteration, a "continues humongous" region might be visited
1586       // before its associated "starts humongous".
1587       return false;
1588     }
1589 
1590     int failures = 0;
1591 
1592     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1593     // this region and set the corresponding bits in the expected region
1594     // and card bitmaps.
1595     bool res = _calc_cl.doHeapRegion(hr);
1596     assert(res == false, "should be continuing");
1597 
1598     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1599                     Mutex::_no_safepoint_check_flag);
1600 
1601     // Verify the marked bytes for this region.
1602     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1603     size_t act_marked_bytes = hr->next_marked_bytes();
1604 
1605     // We're not OK if expected marked bytes > actual marked bytes. It means
1606     // we have missed accounting some objects during the actual marking.
1607     if (exp_marked_bytes > act_marked_bytes) {
1608       if (_verbose) {
1609         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1610                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1611                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1612       }
1613       failures += 1;
1614     }
1615 
1616     // Verify the bit, for this region, in the actual and expected
1617     // (which was just calculated) region bit maps.
1618     // We're not OK if the bit in the calculated expected region
1619     // bitmap is set and the bit in the actual region bitmap is not.
1620     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1621 
1622     bool expected = _exp_region_bm->at(index);
1623     bool actual = _region_bm->at(index);
1624     if (expected && !actual) {
1625       if (_verbose) {
1626         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1627                                "expected: %s, actual: %s",
1628                                hr->hrm_index(),
1629                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1630       }
1631       failures += 1;
1632     }
1633 
1634     // Verify that the card bit maps for the cards spanned by the current
1635     // region match. We have an error if we have a set bit in the expected
1636     // bit map and the corresponding bit in the actual bitmap is not set.
1637 
1638     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1639     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1640 
1641     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1642       expected = _exp_card_bm->at(i);
1643       actual = _card_bm->at(i);
1644 
1645       if (expected && !actual) {
1646         if (_verbose) {
1647           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1648                                  "expected: %s, actual: %s",
1649                                  hr->hrm_index(), i,
1650                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1651         }
1652         failures += 1;
1653       }
1654     }
1655 
1656     if (failures > 0 && _verbose)  {
1657       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1658                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1659                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1660                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1661     }
1662 
1663     _failures += failures;
1664 
1665     // We could stop iteration over the heap when we
1666     // find the first violating region by returning true.
1667     return false;
1668   }
1669 };
1670 
1671 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1672 protected:
1673   G1CollectedHeap* _g1h;
1674   ConcurrentMark* _cm;
1675   BitMap* _actual_region_bm;
1676   BitMap* _actual_card_bm;
1677 
1678   uint    _n_workers;
1679 
1680   BitMap* _expected_region_bm;
1681   BitMap* _expected_card_bm;
1682 
1683   int  _failures;
1684   bool _verbose;
1685 
1686   HeapRegionClaimer _hrclaimer;
1687 
1688 public:
1689   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1690                             BitMap* region_bm, BitMap* card_bm,
1691                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1692     : AbstractGangTask("G1 verify final counting"),
1693       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1694       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1695       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1696       _failures(0), _verbose(false),
1697       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1698     assert(VerifyDuringGC, "don't call this otherwise");
1699     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1700     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1701 
1702     _verbose = _cm->verbose_medium();
1703   }
1704 
1705   void work(uint worker_id) {
1706     assert(worker_id < _n_workers, "invariant");
1707 
1708     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1709                                             _actual_region_bm, _actual_card_bm,
1710                                             _expected_region_bm,
1711                                             _expected_card_bm,
1712                                             _verbose);
1713 
1714     _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
1715 
1716     Atomic::add(verify_cl.failures(), &_failures);
1717   }
1718 
1719   int failures() const { return _failures; }
1720 };
1721 
1722 // Closure that finalizes the liveness counting data.
1723 // Used during the cleanup pause.
1724 // Sets the bits corresponding to the interval [NTAMS, top]
1725 // (which contains the implicitly live objects) in the
1726 // card liveness bitmap. Also sets the bit for each region,
1727 // containing live data, in the region liveness bitmap.
1728 
1729 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1730  public:
1731   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1732                               BitMap* region_bm,
1733                               BitMap* card_bm) :
1734     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1735 
1736   bool doHeapRegion(HeapRegion* hr) {
1737 
1738     if (hr->is_continues_humongous()) {
1739       // We will ignore these here and process them when their
1740       // associated "starts humongous" region is processed (see
1741       // set_bit_for_heap_region()). Note that we cannot rely on their
1742       // associated "starts humongous" region to have their bit set to
1743       // 1 since, due to the region chunking in the parallel region
1744       // iteration, a "continues humongous" region might be visited
1745       // before its associated "starts humongous".
1746       return false;
1747     }
1748 
1749     HeapWord* ntams = hr->next_top_at_mark_start();
1750     HeapWord* top   = hr->top();
1751 
1752     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1753 
1754     // Mark the allocated-since-marking portion...
1755     if (ntams < top) {
1756       // This definitely means the region has live objects.
1757       set_bit_for_region(hr);
1758 
1759       // Now set the bits in the card bitmap for [ntams, top)
1760       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1761       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1762 
1763       // Note: if we're looking at the last region in heap - top
1764       // could be actually just beyond the end of the heap; end_idx
1765       // will then correspond to a (non-existent) card that is also
1766       // just beyond the heap.
1767       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1768         // end of object is not card aligned - increment to cover
1769         // all the cards spanned by the object
1770         end_idx += 1;
1771       }
1772 
1773       assert(end_idx <= _card_bm->size(),
1774              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1775                      end_idx, _card_bm->size()));
1776       assert(start_idx < _card_bm->size(),
1777              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1778                      start_idx, _card_bm->size()));
1779 
1780       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1781     }
1782 
1783     // Set the bit for the region if it contains live data
1784     if (hr->next_marked_bytes() > 0) {
1785       set_bit_for_region(hr);
1786     }
1787 
1788     return false;
1789   }
1790 };
1791 
1792 class G1ParFinalCountTask: public AbstractGangTask {
1793 protected:
1794   G1CollectedHeap* _g1h;
1795   ConcurrentMark* _cm;
1796   BitMap* _actual_region_bm;
1797   BitMap* _actual_card_bm;
1798 
1799   uint    _n_workers;
1800   HeapRegionClaimer _hrclaimer;
1801 
1802 public:
1803   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1804     : AbstractGangTask("G1 final counting"),
1805       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1806       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1807       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1808   }
1809 
1810   void work(uint worker_id) {
1811     assert(worker_id < _n_workers, "invariant");
1812 
1813     FinalCountDataUpdateClosure final_update_cl(_g1h,
1814                                                 _actual_region_bm,
1815                                                 _actual_card_bm);
1816 
1817     _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
1818   }
1819 };
1820 
1821 class G1ParNoteEndTask;
1822 
1823 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1824   G1CollectedHeap* _g1;
1825   size_t _max_live_bytes;
1826   uint _regions_claimed;
1827   size_t _freed_bytes;
1828   FreeRegionList* _local_cleanup_list;
1829   HeapRegionSetCount _old_regions_removed;
1830   HeapRegionSetCount _humongous_regions_removed;
1831   HRRSCleanupTask* _hrrs_cleanup_task;
1832   double _claimed_region_time;
1833   double _max_region_time;
1834 
1835 public:
1836   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1837                              FreeRegionList* local_cleanup_list,
1838                              HRRSCleanupTask* hrrs_cleanup_task) :
1839     _g1(g1),
1840     _max_live_bytes(0), _regions_claimed(0),
1841     _freed_bytes(0),
1842     _claimed_region_time(0.0), _max_region_time(0.0),
1843     _local_cleanup_list(local_cleanup_list),
1844     _old_regions_removed(),
1845     _humongous_regions_removed(),
1846     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1847 
1848   size_t freed_bytes() { return _freed_bytes; }
1849   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1850   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1851 
1852   bool doHeapRegion(HeapRegion *hr) {
1853     if (hr->is_continues_humongous()) {
1854       return false;
1855     }
1856     // We use a claim value of zero here because all regions
1857     // were claimed with value 1 in the FinalCount task.
1858     _g1->reset_gc_time_stamps(hr);
1859     double start = os::elapsedTime();
1860     _regions_claimed++;
1861     hr->note_end_of_marking();
1862     _max_live_bytes += hr->max_live_bytes();
1863 
1864     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1865       _freed_bytes += hr->used();
1866       hr->set_containing_set(NULL);
1867       if (hr->is_humongous()) {
1868         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1869         _humongous_regions_removed.increment(1u, hr->capacity());
1870         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1871       } else {
1872         _old_regions_removed.increment(1u, hr->capacity());
1873         _g1->free_region(hr, _local_cleanup_list, true);
1874       }
1875     } else {
1876       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1877     }
1878 
1879     double region_time = (os::elapsedTime() - start);
1880     _claimed_region_time += region_time;
1881     if (region_time > _max_region_time) {
1882       _max_region_time = region_time;
1883     }
1884     return false;
1885   }
1886 
1887   size_t max_live_bytes() { return _max_live_bytes; }
1888   uint regions_claimed() { return _regions_claimed; }
1889   double claimed_region_time_sec() { return _claimed_region_time; }
1890   double max_region_time_sec() { return _max_region_time; }
1891 };
1892 
1893 class G1ParNoteEndTask: public AbstractGangTask {
1894   friend class G1NoteEndOfConcMarkClosure;
1895 
1896 protected:
1897   G1CollectedHeap* _g1h;
1898   size_t _max_live_bytes;
1899   size_t _freed_bytes;
1900   FreeRegionList* _cleanup_list;
1901   HeapRegionClaimer _hrclaimer;
1902 
1903 public:
1904   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1905       AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1906   }
1907 
1908   void work(uint worker_id) {
1909     FreeRegionList local_cleanup_list("Local Cleanup List");
1910     HRRSCleanupTask hrrs_cleanup_task;
1911     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1912                                            &hrrs_cleanup_task);
1913     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1914     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1915 
1916     // Now update the lists
1917     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1918     {
1919       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1920       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1921       _max_live_bytes += g1_note_end.max_live_bytes();
1922       _freed_bytes += g1_note_end.freed_bytes();
1923 
1924       // If we iterate over the global cleanup list at the end of
1925       // cleanup to do this printing we will not guarantee to only
1926       // generate output for the newly-reclaimed regions (the list
1927       // might not be empty at the beginning of cleanup; we might
1928       // still be working on its previous contents). So we do the
1929       // printing here, before we append the new regions to the global
1930       // cleanup list.
1931 
1932       G1HRPrinter* hr_printer = _g1h->hr_printer();
1933       if (hr_printer->is_active()) {
1934         FreeRegionListIterator iter(&local_cleanup_list);
1935         while (iter.more_available()) {
1936           HeapRegion* hr = iter.get_next();
1937           hr_printer->cleanup(hr);
1938         }
1939       }
1940 
1941       _cleanup_list->add_ordered(&local_cleanup_list);
1942       assert(local_cleanup_list.is_empty(), "post-condition");
1943 
1944       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1945     }
1946   }
1947   size_t max_live_bytes() { return _max_live_bytes; }
1948   size_t freed_bytes() { return _freed_bytes; }
1949 };
1950 
1951 class G1ParScrubRemSetTask: public AbstractGangTask {
1952 protected:
1953   G1RemSet* _g1rs;
1954   BitMap* _region_bm;
1955   BitMap* _card_bm;
1956   HeapRegionClaimer _hrclaimer;
1957 
1958 public:
1959   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1960       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
1961   }
1962 
1963   void work(uint worker_id) {
1964     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
1965   }
1966 
1967 };
1968 
1969 void ConcurrentMark::cleanup() {
1970   // world is stopped at this checkpoint
1971   assert(SafepointSynchronize::is_at_safepoint(),
1972          "world should be stopped");
1973   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1974 
1975   // If a full collection has happened, we shouldn't do this.
1976   if (has_aborted()) {
1977     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1978     return;
1979   }
1980 
1981   g1h->verify_region_sets_optional();
1982 
1983   if (VerifyDuringGC) {
1984     HandleMark hm;  // handle scope
1985     g1h->prepare_for_verify();
1986     Universe::verify(VerifyOption_G1UsePrevMarking,
1987                      " VerifyDuringGC:(before)");
1988   }
1989   g1h->check_bitmaps("Cleanup Start");
1990 
1991   G1CollectorPolicy* g1p = g1h->g1_policy();
1992   g1p->record_concurrent_mark_cleanup_start();
1993 
1994   double start = os::elapsedTime();
1995 
1996   HeapRegionRemSet::reset_for_cleanup_tasks();
1997 
1998   // Do counting once more with the world stopped for good measure.
1999   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2000 
2001   g1h->set_par_threads();
2002   uint n_workers = _g1h->workers()->active_workers();
2003   g1h->workers()->run_task(&g1_par_count_task);
2004   // Done with the parallel phase so reset to 0.
2005   g1h->set_par_threads(0);
2006 
2007   if (VerifyDuringGC) {
2008     // Verify that the counting data accumulated during marking matches
2009     // that calculated by walking the marking bitmap.
2010 
2011     // Bitmaps to hold expected values
2012     BitMap expected_region_bm(_region_bm.size(), true);
2013     BitMap expected_card_bm(_card_bm.size(), true);
2014 
2015     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2016                                                  &_region_bm,
2017                                                  &_card_bm,
2018                                                  &expected_region_bm,
2019                                                  &expected_card_bm);
2020 
2021     g1h->set_par_threads((int)n_workers);
2022     g1h->workers()->run_task(&g1_par_verify_task);
2023     // Done with the parallel phase so reset to 0.
2024     g1h->set_par_threads(0);
2025 
2026     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2027   }
2028 
2029   size_t start_used_bytes = g1h->used();
2030   g1h->set_marking_complete();
2031 
2032   double count_end = os::elapsedTime();
2033   double this_final_counting_time = (count_end - start);
2034   _total_counting_time += this_final_counting_time;
2035 
2036   if (G1PrintRegionLivenessInfo) {
2037     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2038     _g1h->heap_region_iterate(&cl);
2039   }
2040 
2041   // Install newly created mark bitMap as "prev".
2042   swapMarkBitMaps();
2043 
2044   g1h->reset_gc_time_stamp();
2045 
2046   // Note end of marking in all heap regions.
2047   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
2048   g1h->set_par_threads((int)n_workers);
2049   g1h->workers()->run_task(&g1_par_note_end_task);
2050   g1h->set_par_threads(0);
2051   g1h->check_gc_time_stamps();
2052 
2053   if (!cleanup_list_is_empty()) {
2054     // The cleanup list is not empty, so we'll have to process it
2055     // concurrently. Notify anyone else that might be wanting free
2056     // regions that there will be more free regions coming soon.
2057     g1h->set_free_regions_coming();
2058   }
2059 
2060   // call below, since it affects the metric by which we sort the heap
2061   // regions.
2062   if (G1ScrubRemSets) {
2063     double rs_scrub_start = os::elapsedTime();
2064     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2065     g1h->set_par_threads((int)n_workers);
2066     g1h->workers()->run_task(&g1_par_scrub_rs_task);
2067     g1h->set_par_threads(0);
2068 
2069     double rs_scrub_end = os::elapsedTime();
2070     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2071     _total_rs_scrub_time += this_rs_scrub_time;
2072   }
2073 
2074   // this will also free any regions totally full of garbage objects,
2075   // and sort the regions.
2076   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2077 
2078   // Statistics.
2079   double end = os::elapsedTime();
2080   _cleanup_times.add((end - start) * 1000.0);
2081 
2082   if (G1Log::fine()) {
2083     g1h->g1_policy()->print_heap_transition(start_used_bytes);
2084   }
2085 
2086   // Clean up will have freed any regions completely full of garbage.
2087   // Update the soft reference policy with the new heap occupancy.
2088   Universe::update_heap_info_at_gc();
2089 
2090   if (VerifyDuringGC) {
2091     HandleMark hm;  // handle scope
2092     g1h->prepare_for_verify();
2093     Universe::verify(VerifyOption_G1UsePrevMarking,
2094                      " VerifyDuringGC:(after)");
2095   }
2096 
2097   g1h->check_bitmaps("Cleanup End");
2098 
2099   g1h->verify_region_sets_optional();
2100 
2101   // We need to make this be a "collection" so any collection pause that
2102   // races with it goes around and waits for completeCleanup to finish.
2103   g1h->increment_total_collections();
2104 
2105   // Clean out dead classes and update Metaspace sizes.
2106   if (ClassUnloadingWithConcurrentMark) {
2107     ClassLoaderDataGraph::purge();
2108   }
2109   MetaspaceGC::compute_new_size();
2110 
2111   // We reclaimed old regions so we should calculate the sizes to make
2112   // sure we update the old gen/space data.
2113   g1h->g1mm()->update_sizes();
2114   g1h->allocation_context_stats().update_after_mark();
2115 
2116   g1h->trace_heap_after_concurrent_cycle();
2117 }
2118 
2119 void ConcurrentMark::completeCleanup() {
2120   if (has_aborted()) return;
2121 
2122   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2123 
2124   _cleanup_list.verify_optional();
2125   FreeRegionList tmp_free_list("Tmp Free List");
2126 
2127   if (G1ConcRegionFreeingVerbose) {
2128     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2129                            "cleanup list has %u entries",
2130                            _cleanup_list.length());
2131   }
2132 
2133   // No one else should be accessing the _cleanup_list at this point,
2134   // so it is not necessary to take any locks
2135   while (!_cleanup_list.is_empty()) {
2136     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2137     assert(hr != NULL, "Got NULL from a non-empty list");
2138     hr->par_clear();
2139     tmp_free_list.add_ordered(hr);
2140 
2141     // Instead of adding one region at a time to the secondary_free_list,
2142     // we accumulate them in the local list and move them a few at a
2143     // time. This also cuts down on the number of notify_all() calls
2144     // we do during this process. We'll also append the local list when
2145     // _cleanup_list is empty (which means we just removed the last
2146     // region from the _cleanup_list).
2147     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2148         _cleanup_list.is_empty()) {
2149       if (G1ConcRegionFreeingVerbose) {
2150         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2151                                "appending %u entries to the secondary_free_list, "
2152                                "cleanup list still has %u entries",
2153                                tmp_free_list.length(),
2154                                _cleanup_list.length());
2155       }
2156 
2157       {
2158         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2159         g1h->secondary_free_list_add(&tmp_free_list);
2160         SecondaryFreeList_lock->notify_all();
2161       }
2162 #ifndef PRODUCT
2163       if (G1StressConcRegionFreeing) {
2164         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2165           os::sleep(Thread::current(), (jlong) 1, false);
2166         }
2167       }
2168 #endif
2169     }
2170   }
2171   assert(tmp_free_list.is_empty(), "post-condition");
2172 }
2173 
2174 // Supporting Object and Oop closures for reference discovery
2175 // and processing in during marking
2176 
2177 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2178   HeapWord* addr = (HeapWord*)obj;
2179   return addr != NULL &&
2180          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2181 }
2182 
2183 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2184 // Uses the CMTask associated with a worker thread (for serial reference
2185 // processing the CMTask for worker 0 is used) to preserve (mark) and
2186 // trace referent objects.
2187 //
2188 // Using the CMTask and embedded local queues avoids having the worker
2189 // threads operating on the global mark stack. This reduces the risk
2190 // of overflowing the stack - which we would rather avoid at this late
2191 // state. Also using the tasks' local queues removes the potential
2192 // of the workers interfering with each other that could occur if
2193 // operating on the global stack.
2194 
2195 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2196   ConcurrentMark* _cm;
2197   CMTask*         _task;
2198   int             _ref_counter_limit;
2199   int             _ref_counter;
2200   bool            _is_serial;
2201  public:
2202   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2203     _cm(cm), _task(task), _is_serial(is_serial),
2204     _ref_counter_limit(G1RefProcDrainInterval) {
2205     assert(_ref_counter_limit > 0, "sanity");
2206     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2207     _ref_counter = _ref_counter_limit;
2208   }
2209 
2210   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2211   virtual void do_oop(      oop* p) { do_oop_work(p); }
2212 
2213   template <class T> void do_oop_work(T* p) {
2214     if (!_cm->has_overflown()) {
2215       oop obj = oopDesc::load_decode_heap_oop(p);
2216       if (_cm->verbose_high()) {
2217         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2218                                "*"PTR_FORMAT" = "PTR_FORMAT,
2219                                _task->worker_id(), p2i(p), p2i((void*) obj));
2220       }
2221 
2222       _task->deal_with_reference(obj);
2223       _ref_counter--;
2224 
2225       if (_ref_counter == 0) {
2226         // We have dealt with _ref_counter_limit references, pushing them
2227         // and objects reachable from them on to the local stack (and
2228         // possibly the global stack). Call CMTask::do_marking_step() to
2229         // process these entries.
2230         //
2231         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2232         // there's nothing more to do (i.e. we're done with the entries that
2233         // were pushed as a result of the CMTask::deal_with_reference() calls
2234         // above) or we overflow.
2235         //
2236         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2237         // flag while there may still be some work to do. (See the comment at
2238         // the beginning of CMTask::do_marking_step() for those conditions -
2239         // one of which is reaching the specified time target.) It is only
2240         // when CMTask::do_marking_step() returns without setting the
2241         // has_aborted() flag that the marking step has completed.
2242         do {
2243           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2244           _task->do_marking_step(mark_step_duration_ms,
2245                                  false      /* do_termination */,
2246                                  _is_serial);
2247         } while (_task->has_aborted() && !_cm->has_overflown());
2248         _ref_counter = _ref_counter_limit;
2249       }
2250     } else {
2251       if (_cm->verbose_high()) {
2252          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2253       }
2254     }
2255   }
2256 };
2257 
2258 // 'Drain' oop closure used by both serial and parallel reference processing.
2259 // Uses the CMTask associated with a given worker thread (for serial
2260 // reference processing the CMtask for worker 0 is used). Calls the
2261 // do_marking_step routine, with an unbelievably large timeout value,
2262 // to drain the marking data structures of the remaining entries
2263 // added by the 'keep alive' oop closure above.
2264 
2265 class G1CMDrainMarkingStackClosure: public VoidClosure {
2266   ConcurrentMark* _cm;
2267   CMTask*         _task;
2268   bool            _is_serial;
2269  public:
2270   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2271     _cm(cm), _task(task), _is_serial(is_serial) {
2272     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2273   }
2274 
2275   void do_void() {
2276     do {
2277       if (_cm->verbose_high()) {
2278         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2279                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2280       }
2281 
2282       // We call CMTask::do_marking_step() to completely drain the local
2283       // and global marking stacks of entries pushed by the 'keep alive'
2284       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2285       //
2286       // CMTask::do_marking_step() is called in a loop, which we'll exit
2287       // if there's nothing more to do (i.e. we've completely drained the
2288       // entries that were pushed as a a result of applying the 'keep alive'
2289       // closure to the entries on the discovered ref lists) or we overflow
2290       // the global marking stack.
2291       //
2292       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2293       // flag while there may still be some work to do. (See the comment at
2294       // the beginning of CMTask::do_marking_step() for those conditions -
2295       // one of which is reaching the specified time target.) It is only
2296       // when CMTask::do_marking_step() returns without setting the
2297       // has_aborted() flag that the marking step has completed.
2298 
2299       _task->do_marking_step(1000000000.0 /* something very large */,
2300                              true         /* do_termination */,
2301                              _is_serial);
2302     } while (_task->has_aborted() && !_cm->has_overflown());
2303   }
2304 };
2305 
2306 // Implementation of AbstractRefProcTaskExecutor for parallel
2307 // reference processing at the end of G1 concurrent marking
2308 
2309 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2310 private:
2311   G1CollectedHeap* _g1h;
2312   ConcurrentMark*  _cm;
2313   WorkGang*        _workers;
2314   uint             _active_workers;
2315 
2316 public:
2317   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2318                           ConcurrentMark* cm,
2319                           WorkGang* workers,
2320                           uint n_workers) :
2321     _g1h(g1h), _cm(cm),
2322     _workers(workers), _active_workers(n_workers) { }
2323 
2324   // Executes the given task using concurrent marking worker threads.
2325   virtual void execute(ProcessTask& task);
2326   virtual void execute(EnqueueTask& task);
2327 };
2328 
2329 class G1CMRefProcTaskProxy: public AbstractGangTask {
2330   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2331   ProcessTask&     _proc_task;
2332   G1CollectedHeap* _g1h;
2333   ConcurrentMark*  _cm;
2334 
2335 public:
2336   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2337                      G1CollectedHeap* g1h,
2338                      ConcurrentMark* cm) :
2339     AbstractGangTask("Process reference objects in parallel"),
2340     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2341     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2342     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2343   }
2344 
2345   virtual void work(uint worker_id) {
2346     ResourceMark rm;
2347     HandleMark hm;
2348     CMTask* task = _cm->task(worker_id);
2349     G1CMIsAliveClosure g1_is_alive(_g1h);
2350     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2351     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2352 
2353     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2354   }
2355 };
2356 
2357 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2358   assert(_workers != NULL, "Need parallel worker threads.");
2359   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2360 
2361   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2362 
2363   // We need to reset the concurrency level before each
2364   // proxy task execution, so that the termination protocol
2365   // and overflow handling in CMTask::do_marking_step() knows
2366   // how many workers to wait for.
2367   _cm->set_concurrency(_active_workers);
2368   _g1h->set_par_threads(_active_workers);
2369   _workers->run_task(&proc_task_proxy);
2370   _g1h->set_par_threads(0);
2371 }
2372 
2373 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2374   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2375   EnqueueTask& _enq_task;
2376 
2377 public:
2378   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2379     AbstractGangTask("Enqueue reference objects in parallel"),
2380     _enq_task(enq_task) { }
2381 
2382   virtual void work(uint worker_id) {
2383     _enq_task.work(worker_id);
2384   }
2385 };
2386 
2387 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2388   assert(_workers != NULL, "Need parallel worker threads.");
2389   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2390 
2391   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2392 
2393   // Not strictly necessary but...
2394   //
2395   // We need to reset the concurrency level before each
2396   // proxy task execution, so that the termination protocol
2397   // and overflow handling in CMTask::do_marking_step() knows
2398   // how many workers to wait for.
2399   _cm->set_concurrency(_active_workers);
2400   _g1h->set_par_threads(_active_workers);
2401   _workers->run_task(&enq_task_proxy);
2402   _g1h->set_par_threads(0);
2403 }
2404 
2405 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2406   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2407 }
2408 
2409 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2410   if (has_overflown()) {
2411     // Skip processing the discovered references if we have
2412     // overflown the global marking stack. Reference objects
2413     // only get discovered once so it is OK to not
2414     // de-populate the discovered reference lists. We could have,
2415     // but the only benefit would be that, when marking restarts,
2416     // less reference objects are discovered.
2417     return;
2418   }
2419 
2420   ResourceMark rm;
2421   HandleMark   hm;
2422 
2423   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2424 
2425   // Is alive closure.
2426   G1CMIsAliveClosure g1_is_alive(g1h);
2427 
2428   // Inner scope to exclude the cleaning of the string and symbol
2429   // tables from the displayed time.
2430   {
2431     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2432 
2433     ReferenceProcessor* rp = g1h->ref_processor_cm();
2434 
2435     // See the comment in G1CollectedHeap::ref_processing_init()
2436     // about how reference processing currently works in G1.
2437 
2438     // Set the soft reference policy
2439     rp->setup_policy(clear_all_soft_refs);
2440     assert(_markStack.isEmpty(), "mark stack should be empty");
2441 
2442     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2443     // in serial reference processing. Note these closures are also
2444     // used for serially processing (by the the current thread) the
2445     // JNI references during parallel reference processing.
2446     //
2447     // These closures do not need to synchronize with the worker
2448     // threads involved in parallel reference processing as these
2449     // instances are executed serially by the current thread (e.g.
2450     // reference processing is not multi-threaded and is thus
2451     // performed by the current thread instead of a gang worker).
2452     //
2453     // The gang tasks involved in parallel reference processing create
2454     // their own instances of these closures, which do their own
2455     // synchronization among themselves.
2456     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2457     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2458 
2459     // We need at least one active thread. If reference processing
2460     // is not multi-threaded we use the current (VMThread) thread,
2461     // otherwise we use the work gang from the G1CollectedHeap and
2462     // we utilize all the worker threads we can.
2463     bool processing_is_mt = rp->processing_is_mt();
2464     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2465     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2466 
2467     // Parallel processing task executor.
2468     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2469                                               g1h->workers(), active_workers);
2470     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2471 
2472     // Set the concurrency level. The phase was already set prior to
2473     // executing the remark task.
2474     set_concurrency(active_workers);
2475 
2476     // Set the degree of MT processing here.  If the discovery was done MT,
2477     // the number of threads involved during discovery could differ from
2478     // the number of active workers.  This is OK as long as the discovered
2479     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2480     rp->set_active_mt_degree(active_workers);
2481 
2482     // Process the weak references.
2483     const ReferenceProcessorStats& stats =
2484         rp->process_discovered_references(&g1_is_alive,
2485                                           &g1_keep_alive,
2486                                           &g1_drain_mark_stack,
2487                                           executor,
2488                                           g1h->gc_timer_cm(),
2489                                           concurrent_gc_id());
2490     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2491 
2492     // The do_oop work routines of the keep_alive and drain_marking_stack
2493     // oop closures will set the has_overflown flag if we overflow the
2494     // global marking stack.
2495 
2496     assert(_markStack.overflow() || _markStack.isEmpty(),
2497             "mark stack should be empty (unless it overflowed)");
2498 
2499     if (_markStack.overflow()) {
2500       // This should have been done already when we tried to push an
2501       // entry on to the global mark stack. But let's do it again.
2502       set_has_overflown();
2503     }
2504 
2505     assert(rp->num_q() == active_workers, "why not");
2506 
2507     rp->enqueue_discovered_references(executor);
2508 
2509     rp->verify_no_references_recorded();
2510     assert(!rp->discovery_enabled(), "Post condition");
2511   }
2512 
2513   if (has_overflown()) {
2514     // We can not trust g1_is_alive if the marking stack overflowed
2515     return;
2516   }
2517 
2518   assert(_markStack.isEmpty(), "Marking should have completed");
2519 
2520   // Unload Klasses, String, Symbols, Code Cache, etc.
2521   {
2522     G1CMTraceTime trace("Unloading", G1Log::finer());
2523 
2524     if (ClassUnloadingWithConcurrentMark) {
2525       bool purged_classes;
2526 
2527       {
2528         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2529         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2530       }
2531 
2532       {
2533         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2534         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2535       }
2536     }
2537 
2538     if (G1StringDedup::is_enabled()) {
2539       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2540       G1StringDedup::unlink(&g1_is_alive);
2541     }
2542   }
2543 }
2544 
2545 void ConcurrentMark::swapMarkBitMaps() {
2546   CMBitMapRO* temp = _prevMarkBitMap;
2547   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2548   _nextMarkBitMap  = (CMBitMap*)  temp;
2549 }
2550 
2551 // Closure for marking entries in SATB buffers.
2552 class CMSATBBufferClosure : public SATBBufferClosure {
2553 private:
2554   CMTask* _task;
2555   G1CollectedHeap* _g1h;
2556 
2557   // This is very similar to CMTask::deal_with_reference, but with
2558   // more relaxed requirements for the argument, so this must be more
2559   // circumspect about treating the argument as an object.
2560   void do_entry(void* entry) const {
2561     _task->increment_refs_reached();
2562     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2563     if (entry < hr->next_top_at_mark_start()) {
2564       // Until we get here, we don't know whether entry refers to a valid
2565       // object; it could instead have been a stale reference.
2566       oop obj = static_cast<oop>(entry);
2567       assert(obj->is_oop(true /* ignore mark word */),
2568              err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
2569       _task->make_reference_grey(obj, hr);
2570     }
2571   }
2572 
2573 public:
2574   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2575     : _task(task), _g1h(g1h) { }
2576 
2577   virtual void do_buffer(void** buffer, size_t size) {
2578     for (size_t i = 0; i < size; ++i) {
2579       do_entry(buffer[i]);
2580     }
2581   }
2582 };
2583 
2584 class G1RemarkThreadsClosure : public ThreadClosure {
2585   CMSATBBufferClosure _cm_satb_cl;
2586   G1CMOopClosure _cm_cl;
2587   MarkingCodeBlobClosure _code_cl;
2588   int _thread_parity;
2589 
2590  public:
2591   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) :
2592     _cm_satb_cl(task, g1h),
2593     _cm_cl(g1h, g1h->concurrent_mark(), task),
2594     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2595     _thread_parity(Threads::thread_claim_parity()) {}
2596 
2597   void do_thread(Thread* thread) {
2598     if (thread->is_Java_thread()) {
2599       if (thread->claim_oops_do(true, _thread_parity)) {
2600         JavaThread* jt = (JavaThread*)thread;
2601 
2602         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2603         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2604         // * Alive if on the stack of an executing method
2605         // * Weakly reachable otherwise
2606         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2607         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2608         jt->nmethods_do(&_code_cl);
2609 
2610         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
2611       }
2612     } else if (thread->is_VM_thread()) {
2613       if (thread->claim_oops_do(true, _thread_parity)) {
2614         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
2615       }
2616     }
2617   }
2618 };
2619 
2620 class CMRemarkTask: public AbstractGangTask {
2621 private:
2622   ConcurrentMark* _cm;
2623 public:
2624   void work(uint worker_id) {
2625     // Since all available tasks are actually started, we should
2626     // only proceed if we're supposed to be active.
2627     if (worker_id < _cm->active_tasks()) {
2628       CMTask* task = _cm->task(worker_id);
2629       task->record_start_time();
2630       {
2631         ResourceMark rm;
2632         HandleMark hm;
2633 
2634         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2635         Threads::threads_do(&threads_f);
2636       }
2637 
2638       do {
2639         task->do_marking_step(1000000000.0 /* something very large */,
2640                               true         /* do_termination       */,
2641                               false        /* is_serial            */);
2642       } while (task->has_aborted() && !_cm->has_overflown());
2643       // If we overflow, then we do not want to restart. We instead
2644       // want to abort remark and do concurrent marking again.
2645       task->record_end_time();
2646     }
2647   }
2648 
2649   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2650     AbstractGangTask("Par Remark"), _cm(cm) {
2651     _cm->terminator()->reset_for_reuse(active_workers);
2652   }
2653 };
2654 
2655 void ConcurrentMark::checkpointRootsFinalWork() {
2656   ResourceMark rm;
2657   HandleMark   hm;
2658   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2659 
2660   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2661 
2662   g1h->ensure_parsability(false);
2663 
2664   // this is remark, so we'll use up all active threads
2665   uint active_workers = g1h->workers()->active_workers();
2666   if (active_workers == 0) {
2667     assert(active_workers > 0, "Should have been set earlier");
2668     active_workers = (uint) ParallelGCThreads;
2669     g1h->workers()->set_active_workers(active_workers);
2670   }
2671   set_concurrency_and_phase(active_workers, false /* concurrent */);
2672   // Leave _parallel_marking_threads at it's
2673   // value originally calculated in the ConcurrentMark
2674   // constructor and pass values of the active workers
2675   // through the gang in the task.
2676 
2677   {
2678     StrongRootsScope srs(active_workers);
2679 
2680     CMRemarkTask remarkTask(this, active_workers);
2681     // We will start all available threads, even if we decide that the
2682     // active_workers will be fewer. The extra ones will just bail out
2683     // immediately.
2684     g1h->set_par_threads(active_workers);
2685     g1h->workers()->run_task(&remarkTask);
2686     g1h->set_par_threads(0);
2687   }
2688 
2689   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2690   guarantee(has_overflown() ||
2691             satb_mq_set.completed_buffers_num() == 0,
2692             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2693                     BOOL_TO_STR(has_overflown()),
2694                     satb_mq_set.completed_buffers_num()));
2695 
2696   print_stats();
2697 }
2698 
2699 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2700   // Note we are overriding the read-only view of the prev map here, via
2701   // the cast.
2702   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2703 }
2704 
2705 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2706   _nextMarkBitMap->clearRange(mr);
2707 }
2708 
2709 HeapRegion*
2710 ConcurrentMark::claim_region(uint worker_id) {
2711   // "checkpoint" the finger
2712   HeapWord* finger = _finger;
2713 
2714   // _heap_end will not change underneath our feet; it only changes at
2715   // yield points.
2716   while (finger < _heap_end) {
2717     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2718 
2719     // Note on how this code handles humongous regions. In the
2720     // normal case the finger will reach the start of a "starts
2721     // humongous" (SH) region. Its end will either be the end of the
2722     // last "continues humongous" (CH) region in the sequence, or the
2723     // standard end of the SH region (if the SH is the only region in
2724     // the sequence). That way claim_region() will skip over the CH
2725     // regions. However, there is a subtle race between a CM thread
2726     // executing this method and a mutator thread doing a humongous
2727     // object allocation. The two are not mutually exclusive as the CM
2728     // thread does not need to hold the Heap_lock when it gets
2729     // here. So there is a chance that claim_region() will come across
2730     // a free region that's in the progress of becoming a SH or a CH
2731     // region. In the former case, it will either
2732     //   a) Miss the update to the region's end, in which case it will
2733     //      visit every subsequent CH region, will find their bitmaps
2734     //      empty, and do nothing, or
2735     //   b) Will observe the update of the region's end (in which case
2736     //      it will skip the subsequent CH regions).
2737     // If it comes across a region that suddenly becomes CH, the
2738     // scenario will be similar to b). So, the race between
2739     // claim_region() and a humongous object allocation might force us
2740     // to do a bit of unnecessary work (due to some unnecessary bitmap
2741     // iterations) but it should not introduce and correctness issues.
2742     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2743 
2744     // Above heap_region_containing_raw may return NULL as we always scan claim
2745     // until the end of the heap. In this case, just jump to the next region.
2746     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2747 
2748     // Is the gap between reading the finger and doing the CAS too long?
2749     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2750     if (res == finger && curr_region != NULL) {
2751       // we succeeded
2752       HeapWord*   bottom        = curr_region->bottom();
2753       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2754 
2755       if (verbose_low()) {
2756         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2757                                "["PTR_FORMAT", "PTR_FORMAT"), "
2758                                "limit = "PTR_FORMAT,
2759                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2760       }
2761 
2762       // notice that _finger == end cannot be guaranteed here since,
2763       // someone else might have moved the finger even further
2764       assert(_finger >= end, "the finger should have moved forward");
2765 
2766       if (verbose_low()) {
2767         gclog_or_tty->print_cr("[%u] we were successful with region = "
2768                                PTR_FORMAT, worker_id, p2i(curr_region));
2769       }
2770 
2771       if (limit > bottom) {
2772         if (verbose_low()) {
2773           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2774                                  "returning it ", worker_id, p2i(curr_region));
2775         }
2776         return curr_region;
2777       } else {
2778         assert(limit == bottom,
2779                "the region limit should be at bottom");
2780         if (verbose_low()) {
2781           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2782                                  "returning NULL", worker_id, p2i(curr_region));
2783         }
2784         // we return NULL and the caller should try calling
2785         // claim_region() again.
2786         return NULL;
2787       }
2788     } else {
2789       assert(_finger > finger, "the finger should have moved forward");
2790       if (verbose_low()) {
2791         if (curr_region == NULL) {
2792           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
2793                                  "global finger = "PTR_FORMAT", "
2794                                  "our finger = "PTR_FORMAT,
2795                                  worker_id, p2i(_finger), p2i(finger));
2796         } else {
2797           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2798                                  "global finger = "PTR_FORMAT", "
2799                                  "our finger = "PTR_FORMAT,
2800                                  worker_id, p2i(_finger), p2i(finger));
2801         }
2802       }
2803 
2804       // read it again
2805       finger = _finger;
2806     }
2807   }
2808 
2809   return NULL;
2810 }
2811 
2812 #ifndef PRODUCT
2813 enum VerifyNoCSetOopsPhase {
2814   VerifyNoCSetOopsStack,
2815   VerifyNoCSetOopsQueues
2816 };
2817 
2818 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
2819 private:
2820   G1CollectedHeap* _g1h;
2821   VerifyNoCSetOopsPhase _phase;
2822   int _info;
2823 
2824   const char* phase_str() {
2825     switch (_phase) {
2826     case VerifyNoCSetOopsStack:         return "Stack";
2827     case VerifyNoCSetOopsQueues:        return "Queue";
2828     default:                            ShouldNotReachHere();
2829     }
2830     return NULL;
2831   }
2832 
2833   void do_object_work(oop obj) {
2834     guarantee(!_g1h->obj_in_cs(obj),
2835               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2836                       p2i((void*) obj), phase_str(), _info));
2837   }
2838 
2839 public:
2840   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2841 
2842   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2843     _phase = phase;
2844     _info = info;
2845   }
2846 
2847   virtual void do_oop(oop* p) {
2848     oop obj = oopDesc::load_decode_heap_oop(p);
2849     do_object_work(obj);
2850   }
2851 
2852   virtual void do_oop(narrowOop* p) {
2853     // We should not come across narrow oops while scanning marking
2854     // stacks
2855     ShouldNotReachHere();
2856   }
2857 
2858   virtual void do_object(oop obj) {
2859     do_object_work(obj);
2860   }
2861 };
2862 
2863 void ConcurrentMark::verify_no_cset_oops() {
2864   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2865   if (!G1CollectedHeap::heap()->mark_in_progress()) {
2866     return;
2867   }
2868 
2869   VerifyNoCSetOopsClosure cl;
2870 
2871   // Verify entries on the global mark stack
2872   cl.set_phase(VerifyNoCSetOopsStack);
2873   _markStack.oops_do(&cl);
2874 
2875   // Verify entries on the task queues
2876   for (uint i = 0; i < _max_worker_id; i += 1) {
2877     cl.set_phase(VerifyNoCSetOopsQueues, i);
2878     CMTaskQueue* queue = _task_queues->queue(i);
2879     queue->oops_do(&cl);
2880   }
2881 
2882   // Verify the global finger
2883   HeapWord* global_finger = finger();
2884   if (global_finger != NULL && global_finger < _heap_end) {
2885     // The global finger always points to a heap region boundary. We
2886     // use heap_region_containing_raw() to get the containing region
2887     // given that the global finger could be pointing to a free region
2888     // which subsequently becomes continues humongous. If that
2889     // happens, heap_region_containing() will return the bottom of the
2890     // corresponding starts humongous region and the check below will
2891     // not hold any more.
2892     // Since we always iterate over all regions, we might get a NULL HeapRegion
2893     // here.
2894     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2895     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2896               err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
2897                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
2898   }
2899 
2900   // Verify the task fingers
2901   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2902   for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
2903     CMTask* task = _tasks[i];
2904     HeapWord* task_finger = task->finger();
2905     if (task_finger != NULL && task_finger < _heap_end) {
2906       // See above note on the global finger verification.
2907       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2908       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2909                 !task_hr->in_collection_set(),
2910                 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
2911                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
2912     }
2913   }
2914 }
2915 #endif // PRODUCT
2916 
2917 // Aggregate the counting data that was constructed concurrently
2918 // with marking.
2919 class AggregateCountDataHRClosure: public HeapRegionClosure {
2920   G1CollectedHeap* _g1h;
2921   ConcurrentMark* _cm;
2922   CardTableModRefBS* _ct_bs;
2923   BitMap* _cm_card_bm;
2924   uint _max_worker_id;
2925 
2926  public:
2927   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2928                               BitMap* cm_card_bm,
2929                               uint max_worker_id) :
2930     _g1h(g1h), _cm(g1h->concurrent_mark()),
2931     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2932     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2933 
2934   bool doHeapRegion(HeapRegion* hr) {
2935     if (hr->is_continues_humongous()) {
2936       // We will ignore these here and process them when their
2937       // associated "starts humongous" region is processed.
2938       // Note that we cannot rely on their associated
2939       // "starts humongous" region to have their bit set to 1
2940       // since, due to the region chunking in the parallel region
2941       // iteration, a "continues humongous" region might be visited
2942       // before its associated "starts humongous".
2943       return false;
2944     }
2945 
2946     HeapWord* start = hr->bottom();
2947     HeapWord* limit = hr->next_top_at_mark_start();
2948     HeapWord* end = hr->end();
2949 
2950     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2951            err_msg("Preconditions not met - "
2952                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
2953                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
2954                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
2955 
2956     assert(hr->next_marked_bytes() == 0, "Precondition");
2957 
2958     if (start == limit) {
2959       // NTAMS of this region has not been set so nothing to do.
2960       return false;
2961     }
2962 
2963     // 'start' should be in the heap.
2964     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2965     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2966     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2967 
2968     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2969     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2970     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2971 
2972     // If ntams is not card aligned then we bump card bitmap index
2973     // for limit so that we get the all the cards spanned by
2974     // the object ending at ntams.
2975     // Note: if this is the last region in the heap then ntams
2976     // could be actually just beyond the end of the the heap;
2977     // limit_idx will then  correspond to a (non-existent) card
2978     // that is also outside the heap.
2979     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
2980       limit_idx += 1;
2981     }
2982 
2983     assert(limit_idx <= end_idx, "or else use atomics");
2984 
2985     // Aggregate the "stripe" in the count data associated with hr.
2986     uint hrm_index = hr->hrm_index();
2987     size_t marked_bytes = 0;
2988 
2989     for (uint i = 0; i < _max_worker_id; i += 1) {
2990       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
2991       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
2992 
2993       // Fetch the marked_bytes in this region for task i and
2994       // add it to the running total for this region.
2995       marked_bytes += marked_bytes_array[hrm_index];
2996 
2997       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
2998       // into the global card bitmap.
2999       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3000 
3001       while (scan_idx < limit_idx) {
3002         assert(task_card_bm->at(scan_idx) == true, "should be");
3003         _cm_card_bm->set_bit(scan_idx);
3004         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3005 
3006         // BitMap::get_next_one_offset() can handle the case when
3007         // its left_offset parameter is greater than its right_offset
3008         // parameter. It does, however, have an early exit if
3009         // left_offset == right_offset. So let's limit the value
3010         // passed in for left offset here.
3011         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3012         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3013       }
3014     }
3015 
3016     // Update the marked bytes for this region.
3017     hr->add_to_marked_bytes(marked_bytes);
3018 
3019     // Next heap region
3020     return false;
3021   }
3022 };
3023 
3024 class G1AggregateCountDataTask: public AbstractGangTask {
3025 protected:
3026   G1CollectedHeap* _g1h;
3027   ConcurrentMark* _cm;
3028   BitMap* _cm_card_bm;
3029   uint _max_worker_id;
3030   uint _active_workers;
3031   HeapRegionClaimer _hrclaimer;
3032 
3033 public:
3034   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3035                            ConcurrentMark* cm,
3036                            BitMap* cm_card_bm,
3037                            uint max_worker_id,
3038                            uint n_workers) :
3039       AbstractGangTask("Count Aggregation"),
3040       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3041       _max_worker_id(max_worker_id),
3042       _active_workers(n_workers),
3043       _hrclaimer(_active_workers) {
3044   }
3045 
3046   void work(uint worker_id) {
3047     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3048 
3049     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3050   }
3051 };
3052 
3053 
3054 void ConcurrentMark::aggregate_count_data() {
3055   uint n_workers = _g1h->workers()->active_workers();
3056 
3057   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3058                                            _max_worker_id, n_workers);
3059 
3060   _g1h->set_par_threads(n_workers);
3061   _g1h->workers()->run_task(&g1_par_agg_task);
3062   _g1h->set_par_threads(0);
3063 }
3064 
3065 // Clear the per-worker arrays used to store the per-region counting data
3066 void ConcurrentMark::clear_all_count_data() {
3067   // Clear the global card bitmap - it will be filled during
3068   // liveness count aggregation (during remark) and the
3069   // final counting task.
3070   _card_bm.clear();
3071 
3072   // Clear the global region bitmap - it will be filled as part
3073   // of the final counting task.
3074   _region_bm.clear();
3075 
3076   uint max_regions = _g1h->max_regions();
3077   assert(_max_worker_id > 0, "uninitialized");
3078 
3079   for (uint i = 0; i < _max_worker_id; i += 1) {
3080     BitMap* task_card_bm = count_card_bitmap_for(i);
3081     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3082 
3083     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3084     assert(marked_bytes_array != NULL, "uninitialized");
3085 
3086     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3087     task_card_bm->clear();
3088   }
3089 }
3090 
3091 void ConcurrentMark::print_stats() {
3092   if (verbose_stats()) {
3093     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3094     for (size_t i = 0; i < _active_tasks; ++i) {
3095       _tasks[i]->print_stats();
3096       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3097     }
3098   }
3099 }
3100 
3101 // abandon current marking iteration due to a Full GC
3102 void ConcurrentMark::abort() {
3103   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3104   // concurrent bitmap clearing.
3105   _nextMarkBitMap->clearAll();
3106 
3107   // Note we cannot clear the previous marking bitmap here
3108   // since VerifyDuringGC verifies the objects marked during
3109   // a full GC against the previous bitmap.
3110 
3111   // Clear the liveness counting data
3112   clear_all_count_data();
3113   // Empty mark stack
3114   reset_marking_state();
3115   for (uint i = 0; i < _max_worker_id; ++i) {
3116     _tasks[i]->clear_region_fields();
3117   }
3118   _first_overflow_barrier_sync.abort();
3119   _second_overflow_barrier_sync.abort();
3120   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3121   if (!gc_id.is_undefined()) {
3122     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3123     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3124     _aborted_gc_id = gc_id;
3125    }
3126   _has_aborted = true;
3127 
3128   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3129   satb_mq_set.abandon_partial_marking();
3130   // This can be called either during or outside marking, we'll read
3131   // the expected_active value from the SATB queue set.
3132   satb_mq_set.set_active_all_threads(
3133                                  false, /* new active value */
3134                                  satb_mq_set.is_active() /* expected_active */);
3135 
3136   _g1h->trace_heap_after_concurrent_cycle();
3137   _g1h->register_concurrent_cycle_end();
3138 }
3139 
3140 const GCId& ConcurrentMark::concurrent_gc_id() {
3141   if (has_aborted()) {
3142     return _aborted_gc_id;
3143   }
3144   return _g1h->gc_tracer_cm()->gc_id();
3145 }
3146 
3147 static void print_ms_time_info(const char* prefix, const char* name,
3148                                NumberSeq& ns) {
3149   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3150                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3151   if (ns.num() > 0) {
3152     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3153                            prefix, ns.sd(), ns.maximum());
3154   }
3155 }
3156 
3157 void ConcurrentMark::print_summary_info() {
3158   gclog_or_tty->print_cr(" Concurrent marking:");
3159   print_ms_time_info("  ", "init marks", _init_times);
3160   print_ms_time_info("  ", "remarks", _remark_times);
3161   {
3162     print_ms_time_info("     ", "final marks", _remark_mark_times);
3163     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3164 
3165   }
3166   print_ms_time_info("  ", "cleanups", _cleanup_times);
3167   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3168                          _total_counting_time,
3169                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3170                           (double)_cleanup_times.num()
3171                          : 0.0));
3172   if (G1ScrubRemSets) {
3173     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3174                            _total_rs_scrub_time,
3175                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3176                             (double)_cleanup_times.num()
3177                            : 0.0));
3178   }
3179   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3180                          (_init_times.sum() + _remark_times.sum() +
3181                           _cleanup_times.sum())/1000.0);
3182   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3183                 "(%8.2f s marking).",
3184                 cmThread()->vtime_accum(),
3185                 cmThread()->vtime_mark_accum());
3186 }
3187 
3188 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3189   _parallel_workers->print_worker_threads_on(st);
3190 }
3191 
3192 void ConcurrentMark::print_on_error(outputStream* st) const {
3193   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3194       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3195   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3196   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3197 }
3198 
3199 // We take a break if someone is trying to stop the world.
3200 bool ConcurrentMark::do_yield_check(uint worker_id) {
3201   if (SuspendibleThreadSet::should_yield()) {
3202     if (worker_id == 0) {
3203       _g1h->g1_policy()->record_concurrent_pause();
3204     }
3205     SuspendibleThreadSet::yield();
3206     return true;
3207   } else {
3208     return false;
3209   }
3210 }
3211 
3212 #ifndef PRODUCT
3213 // for debugging purposes
3214 void ConcurrentMark::print_finger() {
3215   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3216                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3217   for (uint i = 0; i < _max_worker_id; ++i) {
3218     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3219   }
3220   gclog_or_tty->cr();
3221 }
3222 #endif
3223 
3224 template<bool scan>
3225 inline void CMTask::process_grey_object(oop obj) {
3226   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3227   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3228 
3229   if (_cm->verbose_high()) {
3230     gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3231                            _worker_id, p2i((void*) obj));
3232   }
3233 
3234   size_t obj_size = obj->size();
3235   _words_scanned += obj_size;
3236 
3237   if (scan) {
3238     obj->oop_iterate(_cm_oop_closure);
3239   }
3240   statsOnly( ++_objs_scanned );
3241   check_limits();
3242 }
3243 
3244 template void CMTask::process_grey_object<true>(oop);
3245 template void CMTask::process_grey_object<false>(oop);
3246 
3247 // Closure for iteration over bitmaps
3248 class CMBitMapClosure : public BitMapClosure {
3249 private:
3250   // the bitmap that is being iterated over
3251   CMBitMap*                   _nextMarkBitMap;
3252   ConcurrentMark*             _cm;
3253   CMTask*                     _task;
3254 
3255 public:
3256   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3257     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3258 
3259   bool do_bit(size_t offset) {
3260     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3261     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3262     assert( addr < _cm->finger(), "invariant");
3263 
3264     statsOnly( _task->increase_objs_found_on_bitmap() );
3265     assert(addr >= _task->finger(), "invariant");
3266 
3267     // We move that task's local finger along.
3268     _task->move_finger_to(addr);
3269 
3270     _task->scan_object(oop(addr));
3271     // we only partially drain the local queue and global stack
3272     _task->drain_local_queue(true);
3273     _task->drain_global_stack(true);
3274 
3275     // if the has_aborted flag has been raised, we need to bail out of
3276     // the iteration
3277     return !_task->has_aborted();
3278   }
3279 };
3280 
3281 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3282                                ConcurrentMark* cm,
3283                                CMTask* task)
3284   : _g1h(g1h), _cm(cm), _task(task) {
3285   assert(_ref_processor == NULL, "should be initialized to NULL");
3286 
3287   if (G1UseConcMarkReferenceProcessing) {
3288     _ref_processor = g1h->ref_processor_cm();
3289     assert(_ref_processor != NULL, "should not be NULL");
3290   }
3291 }
3292 
3293 void CMTask::setup_for_region(HeapRegion* hr) {
3294   assert(hr != NULL,
3295         "claim_region() should have filtered out NULL regions");
3296   assert(!hr->is_continues_humongous(),
3297         "claim_region() should have filtered out continues humongous regions");
3298 
3299   if (_cm->verbose_low()) {
3300     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3301                            _worker_id, p2i(hr));
3302   }
3303 
3304   _curr_region  = hr;
3305   _finger       = hr->bottom();
3306   update_region_limit();
3307 }
3308 
3309 void CMTask::update_region_limit() {
3310   HeapRegion* hr            = _curr_region;
3311   HeapWord* bottom          = hr->bottom();
3312   HeapWord* limit           = hr->next_top_at_mark_start();
3313 
3314   if (limit == bottom) {
3315     if (_cm->verbose_low()) {
3316       gclog_or_tty->print_cr("[%u] found an empty region "
3317                              "["PTR_FORMAT", "PTR_FORMAT")",
3318                              _worker_id, p2i(bottom), p2i(limit));
3319     }
3320     // The region was collected underneath our feet.
3321     // We set the finger to bottom to ensure that the bitmap
3322     // iteration that will follow this will not do anything.
3323     // (this is not a condition that holds when we set the region up,
3324     // as the region is not supposed to be empty in the first place)
3325     _finger = bottom;
3326   } else if (limit >= _region_limit) {
3327     assert(limit >= _finger, "peace of mind");
3328   } else {
3329     assert(limit < _region_limit, "only way to get here");
3330     // This can happen under some pretty unusual circumstances.  An
3331     // evacuation pause empties the region underneath our feet (NTAMS
3332     // at bottom). We then do some allocation in the region (NTAMS
3333     // stays at bottom), followed by the region being used as a GC
3334     // alloc region (NTAMS will move to top() and the objects
3335     // originally below it will be grayed). All objects now marked in
3336     // the region are explicitly grayed, if below the global finger,
3337     // and we do not need in fact to scan anything else. So, we simply
3338     // set _finger to be limit to ensure that the bitmap iteration
3339     // doesn't do anything.
3340     _finger = limit;
3341   }
3342 
3343   _region_limit = limit;
3344 }
3345 
3346 void CMTask::giveup_current_region() {
3347   assert(_curr_region != NULL, "invariant");
3348   if (_cm->verbose_low()) {
3349     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3350                            _worker_id, p2i(_curr_region));
3351   }
3352   clear_region_fields();
3353 }
3354 
3355 void CMTask::clear_region_fields() {
3356   // Values for these three fields that indicate that we're not
3357   // holding on to a region.
3358   _curr_region   = NULL;
3359   _finger        = NULL;
3360   _region_limit  = NULL;
3361 }
3362 
3363 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3364   if (cm_oop_closure == NULL) {
3365     assert(_cm_oop_closure != NULL, "invariant");
3366   } else {
3367     assert(_cm_oop_closure == NULL, "invariant");
3368   }
3369   _cm_oop_closure = cm_oop_closure;
3370 }
3371 
3372 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3373   guarantee(nextMarkBitMap != NULL, "invariant");
3374 
3375   if (_cm->verbose_low()) {
3376     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3377   }
3378 
3379   _nextMarkBitMap                = nextMarkBitMap;
3380   clear_region_fields();
3381 
3382   _calls                         = 0;
3383   _elapsed_time_ms               = 0.0;
3384   _termination_time_ms           = 0.0;
3385   _termination_start_time_ms     = 0.0;
3386 
3387 #if _MARKING_STATS_
3388   _aborted                       = 0;
3389   _aborted_overflow              = 0;
3390   _aborted_cm_aborted            = 0;
3391   _aborted_yield                 = 0;
3392   _aborted_timed_out             = 0;
3393   _aborted_satb                  = 0;
3394   _aborted_termination           = 0;
3395   _steal_attempts                = 0;
3396   _steals                        = 0;
3397   _local_pushes                  = 0;
3398   _local_pops                    = 0;
3399   _local_max_size                = 0;
3400   _objs_scanned                  = 0;
3401   _global_pushes                 = 0;
3402   _global_pops                   = 0;
3403   _global_max_size               = 0;
3404   _global_transfers_to           = 0;
3405   _global_transfers_from         = 0;
3406   _regions_claimed               = 0;
3407   _objs_found_on_bitmap          = 0;
3408   _satb_buffers_processed        = 0;
3409 #endif // _MARKING_STATS_
3410 }
3411 
3412 bool CMTask::should_exit_termination() {
3413   regular_clock_call();
3414   // This is called when we are in the termination protocol. We should
3415   // quit if, for some reason, this task wants to abort or the global
3416   // stack is not empty (this means that we can get work from it).
3417   return !_cm->mark_stack_empty() || has_aborted();
3418 }
3419 
3420 void CMTask::reached_limit() {
3421   assert(_words_scanned >= _words_scanned_limit ||
3422          _refs_reached >= _refs_reached_limit ,
3423          "shouldn't have been called otherwise");
3424   regular_clock_call();
3425 }
3426 
3427 void CMTask::regular_clock_call() {
3428   if (has_aborted()) return;
3429 
3430   // First, we need to recalculate the words scanned and refs reached
3431   // limits for the next clock call.
3432   recalculate_limits();
3433 
3434   // During the regular clock call we do the following
3435 
3436   // (1) If an overflow has been flagged, then we abort.
3437   if (_cm->has_overflown()) {
3438     set_has_aborted();
3439     return;
3440   }
3441 
3442   // If we are not concurrent (i.e. we're doing remark) we don't need
3443   // to check anything else. The other steps are only needed during
3444   // the concurrent marking phase.
3445   if (!concurrent()) return;
3446 
3447   // (2) If marking has been aborted for Full GC, then we also abort.
3448   if (_cm->has_aborted()) {
3449     set_has_aborted();
3450     statsOnly( ++_aborted_cm_aborted );
3451     return;
3452   }
3453 
3454   double curr_time_ms = os::elapsedVTime() * 1000.0;
3455 
3456   // (3) If marking stats are enabled, then we update the step history.
3457 #if _MARKING_STATS_
3458   if (_words_scanned >= _words_scanned_limit) {
3459     ++_clock_due_to_scanning;
3460   }
3461   if (_refs_reached >= _refs_reached_limit) {
3462     ++_clock_due_to_marking;
3463   }
3464 
3465   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3466   _interval_start_time_ms = curr_time_ms;
3467   _all_clock_intervals_ms.add(last_interval_ms);
3468 
3469   if (_cm->verbose_medium()) {
3470       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3471                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3472                         _worker_id, last_interval_ms,
3473                         _words_scanned,
3474                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3475                         _refs_reached,
3476                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3477   }
3478 #endif // _MARKING_STATS_
3479 
3480   // (4) We check whether we should yield. If we have to, then we abort.
3481   if (SuspendibleThreadSet::should_yield()) {
3482     // We should yield. To do this we abort the task. The caller is
3483     // responsible for yielding.
3484     set_has_aborted();
3485     statsOnly( ++_aborted_yield );
3486     return;
3487   }
3488 
3489   // (5) We check whether we've reached our time quota. If we have,
3490   // then we abort.
3491   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3492   if (elapsed_time_ms > _time_target_ms) {
3493     set_has_aborted();
3494     _has_timed_out = true;
3495     statsOnly( ++_aborted_timed_out );
3496     return;
3497   }
3498 
3499   // (6) Finally, we check whether there are enough completed STAB
3500   // buffers available for processing. If there are, we abort.
3501   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3502   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3503     if (_cm->verbose_low()) {
3504       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3505                              _worker_id);
3506     }
3507     // we do need to process SATB buffers, we'll abort and restart
3508     // the marking task to do so
3509     set_has_aborted();
3510     statsOnly( ++_aborted_satb );
3511     return;
3512   }
3513 }
3514 
3515 void CMTask::recalculate_limits() {
3516   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3517   _words_scanned_limit      = _real_words_scanned_limit;
3518 
3519   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3520   _refs_reached_limit       = _real_refs_reached_limit;
3521 }
3522 
3523 void CMTask::decrease_limits() {
3524   // This is called when we believe that we're going to do an infrequent
3525   // operation which will increase the per byte scanned cost (i.e. move
3526   // entries to/from the global stack). It basically tries to decrease the
3527   // scanning limit so that the clock is called earlier.
3528 
3529   if (_cm->verbose_medium()) {
3530     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3531   }
3532 
3533   _words_scanned_limit = _real_words_scanned_limit -
3534     3 * words_scanned_period / 4;
3535   _refs_reached_limit  = _real_refs_reached_limit -
3536     3 * refs_reached_period / 4;
3537 }
3538 
3539 void CMTask::move_entries_to_global_stack() {
3540   // local array where we'll store the entries that will be popped
3541   // from the local queue
3542   oop buffer[global_stack_transfer_size];
3543 
3544   int n = 0;
3545   oop obj;
3546   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3547     buffer[n] = obj;
3548     ++n;
3549   }
3550 
3551   if (n > 0) {
3552     // we popped at least one entry from the local queue
3553 
3554     statsOnly( ++_global_transfers_to; _local_pops += n );
3555 
3556     if (!_cm->mark_stack_push(buffer, n)) {
3557       if (_cm->verbose_low()) {
3558         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3559                                _worker_id);
3560       }
3561       set_has_aborted();
3562     } else {
3563       // the transfer was successful
3564 
3565       if (_cm->verbose_medium()) {
3566         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3567                                _worker_id, n);
3568       }
3569       statsOnly( size_t tmp_size = _cm->mark_stack_size();
3570                  if (tmp_size > _global_max_size) {
3571                    _global_max_size = tmp_size;
3572                  }
3573                  _global_pushes += n );
3574     }
3575   }
3576 
3577   // this operation was quite expensive, so decrease the limits
3578   decrease_limits();
3579 }
3580 
3581 void CMTask::get_entries_from_global_stack() {
3582   // local array where we'll store the entries that will be popped
3583   // from the global stack.
3584   oop buffer[global_stack_transfer_size];
3585   int n;
3586   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3587   assert(n <= global_stack_transfer_size,
3588          "we should not pop more than the given limit");
3589   if (n > 0) {
3590     // yes, we did actually pop at least one entry
3591 
3592     statsOnly( ++_global_transfers_from; _global_pops += n );
3593     if (_cm->verbose_medium()) {
3594       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3595                              _worker_id, n);
3596     }
3597     for (int i = 0; i < n; ++i) {
3598       bool success = _task_queue->push(buffer[i]);
3599       // We only call this when the local queue is empty or under a
3600       // given target limit. So, we do not expect this push to fail.
3601       assert(success, "invariant");
3602     }
3603 
3604     statsOnly( size_t tmp_size = (size_t)_task_queue->size();
3605                if (tmp_size > _local_max_size) {
3606                  _local_max_size = tmp_size;
3607                }
3608                _local_pushes += n );
3609   }
3610 
3611   // this operation was quite expensive, so decrease the limits
3612   decrease_limits();
3613 }
3614 
3615 void CMTask::drain_local_queue(bool partially) {
3616   if (has_aborted()) return;
3617 
3618   // Decide what the target size is, depending whether we're going to
3619   // drain it partially (so that other tasks can steal if they run out
3620   // of things to do) or totally (at the very end).
3621   size_t target_size;
3622   if (partially) {
3623     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3624   } else {
3625     target_size = 0;
3626   }
3627 
3628   if (_task_queue->size() > target_size) {
3629     if (_cm->verbose_high()) {
3630       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3631                              _worker_id, target_size);
3632     }
3633 
3634     oop obj;
3635     bool ret = _task_queue->pop_local(obj);
3636     while (ret) {
3637       statsOnly( ++_local_pops );
3638 
3639       if (_cm->verbose_high()) {
3640         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3641                                p2i((void*) obj));
3642       }
3643 
3644       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3645       assert(!_g1h->is_on_master_free_list(
3646                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3647 
3648       scan_object(obj);
3649 
3650       if (_task_queue->size() <= target_size || has_aborted()) {
3651         ret = false;
3652       } else {
3653         ret = _task_queue->pop_local(obj);
3654       }
3655     }
3656 
3657     if (_cm->verbose_high()) {
3658       gclog_or_tty->print_cr("[%u] drained local queue, size = %u",
3659                              _worker_id, _task_queue->size());
3660     }
3661   }
3662 }
3663 
3664 void CMTask::drain_global_stack(bool partially) {
3665   if (has_aborted()) return;
3666 
3667   // We have a policy to drain the local queue before we attempt to
3668   // drain the global stack.
3669   assert(partially || _task_queue->size() == 0, "invariant");
3670 
3671   // Decide what the target size is, depending whether we're going to
3672   // drain it partially (so that other tasks can steal if they run out
3673   // of things to do) or totally (at the very end).  Notice that,
3674   // because we move entries from the global stack in chunks or
3675   // because another task might be doing the same, we might in fact
3676   // drop below the target. But, this is not a problem.
3677   size_t target_size;
3678   if (partially) {
3679     target_size = _cm->partial_mark_stack_size_target();
3680   } else {
3681     target_size = 0;
3682   }
3683 
3684   if (_cm->mark_stack_size() > target_size) {
3685     if (_cm->verbose_low()) {
3686       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3687                              _worker_id, target_size);
3688     }
3689 
3690     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3691       get_entries_from_global_stack();
3692       drain_local_queue(partially);
3693     }
3694 
3695     if (_cm->verbose_low()) {
3696       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3697                              _worker_id, _cm->mark_stack_size());
3698     }
3699   }
3700 }
3701 
3702 // SATB Queue has several assumptions on whether to call the par or
3703 // non-par versions of the methods. this is why some of the code is
3704 // replicated. We should really get rid of the single-threaded version
3705 // of the code to simplify things.
3706 void CMTask::drain_satb_buffers() {
3707   if (has_aborted()) return;
3708 
3709   // We set this so that the regular clock knows that we're in the
3710   // middle of draining buffers and doesn't set the abort flag when it
3711   // notices that SATB buffers are available for draining. It'd be
3712   // very counter productive if it did that. :-)
3713   _draining_satb_buffers = true;
3714 
3715   CMSATBBufferClosure satb_cl(this, _g1h);
3716   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3717 
3718   // This keeps claiming and applying the closure to completed buffers
3719   // until we run out of buffers or we need to abort.
3720   while (!has_aborted() &&
3721          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3722     if (_cm->verbose_medium()) {
3723       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3724     }
3725     statsOnly( ++_satb_buffers_processed );
3726     regular_clock_call();
3727   }
3728 
3729   _draining_satb_buffers = false;
3730 
3731   assert(has_aborted() ||
3732          concurrent() ||
3733          satb_mq_set.completed_buffers_num() == 0, "invariant");
3734 
3735   // again, this was a potentially expensive operation, decrease the
3736   // limits to get the regular clock call early
3737   decrease_limits();
3738 }
3739 
3740 void CMTask::print_stats() {
3741   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3742                          _worker_id, _calls);
3743   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3744                          _elapsed_time_ms, _termination_time_ms);
3745   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3746                          _step_times_ms.num(), _step_times_ms.avg(),
3747                          _step_times_ms.sd());
3748   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3749                          _step_times_ms.maximum(), _step_times_ms.sum());
3750 
3751 #if _MARKING_STATS_
3752   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3753                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3754                          _all_clock_intervals_ms.sd());
3755   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
3756                          _all_clock_intervals_ms.maximum(),
3757                          _all_clock_intervals_ms.sum());
3758   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT,
3759                          _clock_due_to_scanning, _clock_due_to_marking);
3760   gclog_or_tty->print_cr("  Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT,
3761                          _objs_scanned, _objs_found_on_bitmap);
3762   gclog_or_tty->print_cr("  Local Queue:  pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3763                          _local_pushes, _local_pops, _local_max_size);
3764   gclog_or_tty->print_cr("  Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT,
3765                          _global_pushes, _global_pops, _global_max_size);
3766   gclog_or_tty->print_cr("                transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT,
3767                          _global_transfers_to,_global_transfers_from);
3768   gclog_or_tty->print_cr("  Regions: claimed = " SIZE_FORMAT, _regions_claimed);
3769   gclog_or_tty->print_cr("  SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed);
3770   gclog_or_tty->print_cr("  Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT,
3771                          _steal_attempts, _steals);
3772   gclog_or_tty->print_cr("  Aborted: " SIZE_FORMAT ", due to", _aborted);
3773   gclog_or_tty->print_cr("    overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT,
3774                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3775   gclog_or_tty->print_cr("    time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT,
3776                          _aborted_timed_out, _aborted_satb, _aborted_termination);
3777 #endif // _MARKING_STATS_
3778 }
3779 
3780 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3781   return _task_queues->steal(worker_id, hash_seed, obj);
3782 }
3783 
3784 /*****************************************************************************
3785 
3786     The do_marking_step(time_target_ms, ...) method is the building
3787     block of the parallel marking framework. It can be called in parallel
3788     with other invocations of do_marking_step() on different tasks
3789     (but only one per task, obviously) and concurrently with the
3790     mutator threads, or during remark, hence it eliminates the need
3791     for two versions of the code. When called during remark, it will
3792     pick up from where the task left off during the concurrent marking
3793     phase. Interestingly, tasks are also claimable during evacuation
3794     pauses too, since do_marking_step() ensures that it aborts before
3795     it needs to yield.
3796 
3797     The data structures that it uses to do marking work are the
3798     following:
3799 
3800       (1) Marking Bitmap. If there are gray objects that appear only
3801       on the bitmap (this happens either when dealing with an overflow
3802       or when the initial marking phase has simply marked the roots
3803       and didn't push them on the stack), then tasks claim heap
3804       regions whose bitmap they then scan to find gray objects. A
3805       global finger indicates where the end of the last claimed region
3806       is. A local finger indicates how far into the region a task has
3807       scanned. The two fingers are used to determine how to gray an
3808       object (i.e. whether simply marking it is OK, as it will be
3809       visited by a task in the future, or whether it needs to be also
3810       pushed on a stack).
3811 
3812       (2) Local Queue. The local queue of the task which is accessed
3813       reasonably efficiently by the task. Other tasks can steal from
3814       it when they run out of work. Throughout the marking phase, a
3815       task attempts to keep its local queue short but not totally
3816       empty, so that entries are available for stealing by other
3817       tasks. Only when there is no more work, a task will totally
3818       drain its local queue.
3819 
3820       (3) Global Mark Stack. This handles local queue overflow. During
3821       marking only sets of entries are moved between it and the local
3822       queues, as access to it requires a mutex and more fine-grain
3823       interaction with it which might cause contention. If it
3824       overflows, then the marking phase should restart and iterate
3825       over the bitmap to identify gray objects. Throughout the marking
3826       phase, tasks attempt to keep the global mark stack at a small
3827       length but not totally empty, so that entries are available for
3828       popping by other tasks. Only when there is no more work, tasks
3829       will totally drain the global mark stack.
3830 
3831       (4) SATB Buffer Queue. This is where completed SATB buffers are
3832       made available. Buffers are regularly removed from this queue
3833       and scanned for roots, so that the queue doesn't get too
3834       long. During remark, all completed buffers are processed, as
3835       well as the filled in parts of any uncompleted buffers.
3836 
3837     The do_marking_step() method tries to abort when the time target
3838     has been reached. There are a few other cases when the
3839     do_marking_step() method also aborts:
3840 
3841       (1) When the marking phase has been aborted (after a Full GC).
3842 
3843       (2) When a global overflow (on the global stack) has been
3844       triggered. Before the task aborts, it will actually sync up with
3845       the other tasks to ensure that all the marking data structures
3846       (local queues, stacks, fingers etc.)  are re-initialized so that
3847       when do_marking_step() completes, the marking phase can
3848       immediately restart.
3849 
3850       (3) When enough completed SATB buffers are available. The
3851       do_marking_step() method only tries to drain SATB buffers right
3852       at the beginning. So, if enough buffers are available, the
3853       marking step aborts and the SATB buffers are processed at
3854       the beginning of the next invocation.
3855 
3856       (4) To yield. when we have to yield then we abort and yield
3857       right at the end of do_marking_step(). This saves us from a lot
3858       of hassle as, by yielding we might allow a Full GC. If this
3859       happens then objects will be compacted underneath our feet, the
3860       heap might shrink, etc. We save checking for this by just
3861       aborting and doing the yield right at the end.
3862 
3863     From the above it follows that the do_marking_step() method should
3864     be called in a loop (or, otherwise, regularly) until it completes.
3865 
3866     If a marking step completes without its has_aborted() flag being
3867     true, it means it has completed the current marking phase (and
3868     also all other marking tasks have done so and have all synced up).
3869 
3870     A method called regular_clock_call() is invoked "regularly" (in
3871     sub ms intervals) throughout marking. It is this clock method that
3872     checks all the abort conditions which were mentioned above and
3873     decides when the task should abort. A work-based scheme is used to
3874     trigger this clock method: when the number of object words the
3875     marking phase has scanned or the number of references the marking
3876     phase has visited reach a given limit. Additional invocations to
3877     the method clock have been planted in a few other strategic places
3878     too. The initial reason for the clock method was to avoid calling
3879     vtime too regularly, as it is quite expensive. So, once it was in
3880     place, it was natural to piggy-back all the other conditions on it
3881     too and not constantly check them throughout the code.
3882 
3883     If do_termination is true then do_marking_step will enter its
3884     termination protocol.
3885 
3886     The value of is_serial must be true when do_marking_step is being
3887     called serially (i.e. by the VMThread) and do_marking_step should
3888     skip any synchronization in the termination and overflow code.
3889     Examples include the serial remark code and the serial reference
3890     processing closures.
3891 
3892     The value of is_serial must be false when do_marking_step is
3893     being called by any of the worker threads in a work gang.
3894     Examples include the concurrent marking code (CMMarkingTask),
3895     the MT remark code, and the MT reference processing closures.
3896 
3897  *****************************************************************************/
3898 
3899 void CMTask::do_marking_step(double time_target_ms,
3900                              bool do_termination,
3901                              bool is_serial) {
3902   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3903   assert(concurrent() == _cm->concurrent(), "they should be the same");
3904 
3905   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
3906   assert(_task_queues != NULL, "invariant");
3907   assert(_task_queue != NULL, "invariant");
3908   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
3909 
3910   assert(!_claimed,
3911          "only one thread should claim this task at any one time");
3912 
3913   // OK, this doesn't safeguard again all possible scenarios, as it is
3914   // possible for two threads to set the _claimed flag at the same
3915   // time. But it is only for debugging purposes anyway and it will
3916   // catch most problems.
3917   _claimed = true;
3918 
3919   _start_time_ms = os::elapsedVTime() * 1000.0;
3920   statsOnly( _interval_start_time_ms = _start_time_ms );
3921 
3922   // If do_stealing is true then do_marking_step will attempt to
3923   // steal work from the other CMTasks. It only makes sense to
3924   // enable stealing when the termination protocol is enabled
3925   // and do_marking_step() is not being called serially.
3926   bool do_stealing = do_termination && !is_serial;
3927 
3928   double diff_prediction_ms =
3929     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
3930   _time_target_ms = time_target_ms - diff_prediction_ms;
3931 
3932   // set up the variables that are used in the work-based scheme to
3933   // call the regular clock method
3934   _words_scanned = 0;
3935   _refs_reached  = 0;
3936   recalculate_limits();
3937 
3938   // clear all flags
3939   clear_has_aborted();
3940   _has_timed_out = false;
3941   _draining_satb_buffers = false;
3942 
3943   ++_calls;
3944 
3945   if (_cm->verbose_low()) {
3946     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
3947                            "target = %1.2lfms >>>>>>>>>>",
3948                            _worker_id, _calls, _time_target_ms);
3949   }
3950 
3951   // Set up the bitmap and oop closures. Anything that uses them is
3952   // eventually called from this method, so it is OK to allocate these
3953   // statically.
3954   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
3955   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
3956   set_cm_oop_closure(&cm_oop_closure);
3957 
3958   if (_cm->has_overflown()) {
3959     // This can happen if the mark stack overflows during a GC pause
3960     // and this task, after a yield point, restarts. We have to abort
3961     // as we need to get into the overflow protocol which happens
3962     // right at the end of this task.
3963     set_has_aborted();
3964   }
3965 
3966   // First drain any available SATB buffers. After this, we will not
3967   // look at SATB buffers before the next invocation of this method.
3968   // If enough completed SATB buffers are queued up, the regular clock
3969   // will abort this task so that it restarts.
3970   drain_satb_buffers();
3971   // ...then partially drain the local queue and the global stack
3972   drain_local_queue(true);
3973   drain_global_stack(true);
3974 
3975   do {
3976     if (!has_aborted() && _curr_region != NULL) {
3977       // This means that we're already holding on to a region.
3978       assert(_finger != NULL, "if region is not NULL, then the finger "
3979              "should not be NULL either");
3980 
3981       // We might have restarted this task after an evacuation pause
3982       // which might have evacuated the region we're holding on to
3983       // underneath our feet. Let's read its limit again to make sure
3984       // that we do not iterate over a region of the heap that
3985       // contains garbage (update_region_limit() will also move
3986       // _finger to the start of the region if it is found empty).
3987       update_region_limit();
3988       // We will start from _finger not from the start of the region,
3989       // as we might be restarting this task after aborting half-way
3990       // through scanning this region. In this case, _finger points to
3991       // the address where we last found a marked object. If this is a
3992       // fresh region, _finger points to start().
3993       MemRegion mr = MemRegion(_finger, _region_limit);
3994 
3995       if (_cm->verbose_low()) {
3996         gclog_or_tty->print_cr("[%u] we're scanning part "
3997                                "["PTR_FORMAT", "PTR_FORMAT") "
3998                                "of region "HR_FORMAT,
3999                                _worker_id, p2i(_finger), p2i(_region_limit),
4000                                HR_FORMAT_PARAMS(_curr_region));
4001       }
4002 
4003       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
4004              "humongous regions should go around loop once only");
4005 
4006       // Some special cases:
4007       // If the memory region is empty, we can just give up the region.
4008       // If the current region is humongous then we only need to check
4009       // the bitmap for the bit associated with the start of the object,
4010       // scan the object if it's live, and give up the region.
4011       // Otherwise, let's iterate over the bitmap of the part of the region
4012       // that is left.
4013       // If the iteration is successful, give up the region.
4014       if (mr.is_empty()) {
4015         giveup_current_region();
4016         regular_clock_call();
4017       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
4018         if (_nextMarkBitMap->isMarked(mr.start())) {
4019           // The object is marked - apply the closure
4020           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4021           bitmap_closure.do_bit(offset);
4022         }
4023         // Even if this task aborted while scanning the humongous object
4024         // we can (and should) give up the current region.
4025         giveup_current_region();
4026         regular_clock_call();
4027       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4028         giveup_current_region();
4029         regular_clock_call();
4030       } else {
4031         assert(has_aborted(), "currently the only way to do so");
4032         // The only way to abort the bitmap iteration is to return
4033         // false from the do_bit() method. However, inside the
4034         // do_bit() method we move the _finger to point to the
4035         // object currently being looked at. So, if we bail out, we
4036         // have definitely set _finger to something non-null.
4037         assert(_finger != NULL, "invariant");
4038 
4039         // Region iteration was actually aborted. So now _finger
4040         // points to the address of the object we last scanned. If we
4041         // leave it there, when we restart this task, we will rescan
4042         // the object. It is easy to avoid this. We move the finger by
4043         // enough to point to the next possible object header (the
4044         // bitmap knows by how much we need to move it as it knows its
4045         // granularity).
4046         assert(_finger < _region_limit, "invariant");
4047         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4048         // Check if bitmap iteration was aborted while scanning the last object
4049         if (new_finger >= _region_limit) {
4050           giveup_current_region();
4051         } else {
4052           move_finger_to(new_finger);
4053         }
4054       }
4055     }
4056     // At this point we have either completed iterating over the
4057     // region we were holding on to, or we have aborted.
4058 
4059     // We then partially drain the local queue and the global stack.
4060     // (Do we really need this?)
4061     drain_local_queue(true);
4062     drain_global_stack(true);
4063 
4064     // Read the note on the claim_region() method on why it might
4065     // return NULL with potentially more regions available for
4066     // claiming and why we have to check out_of_regions() to determine
4067     // whether we're done or not.
4068     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4069       // We are going to try to claim a new region. We should have
4070       // given up on the previous one.
4071       // Separated the asserts so that we know which one fires.
4072       assert(_curr_region  == NULL, "invariant");
4073       assert(_finger       == NULL, "invariant");
4074       assert(_region_limit == NULL, "invariant");
4075       if (_cm->verbose_low()) {
4076         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4077       }
4078       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4079       if (claimed_region != NULL) {
4080         // Yes, we managed to claim one
4081         statsOnly( ++_regions_claimed );
4082 
4083         if (_cm->verbose_low()) {
4084           gclog_or_tty->print_cr("[%u] we successfully claimed "
4085                                  "region "PTR_FORMAT,
4086                                  _worker_id, p2i(claimed_region));
4087         }
4088 
4089         setup_for_region(claimed_region);
4090         assert(_curr_region == claimed_region, "invariant");
4091       }
4092       // It is important to call the regular clock here. It might take
4093       // a while to claim a region if, for example, we hit a large
4094       // block of empty regions. So we need to call the regular clock
4095       // method once round the loop to make sure it's called
4096       // frequently enough.
4097       regular_clock_call();
4098     }
4099 
4100     if (!has_aborted() && _curr_region == NULL) {
4101       assert(_cm->out_of_regions(),
4102              "at this point we should be out of regions");
4103     }
4104   } while ( _curr_region != NULL && !has_aborted());
4105 
4106   if (!has_aborted()) {
4107     // We cannot check whether the global stack is empty, since other
4108     // tasks might be pushing objects to it concurrently.
4109     assert(_cm->out_of_regions(),
4110            "at this point we should be out of regions");
4111 
4112     if (_cm->verbose_low()) {
4113       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4114     }
4115 
4116     // Try to reduce the number of available SATB buffers so that
4117     // remark has less work to do.
4118     drain_satb_buffers();
4119   }
4120 
4121   // Since we've done everything else, we can now totally drain the
4122   // local queue and global stack.
4123   drain_local_queue(false);
4124   drain_global_stack(false);
4125 
4126   // Attempt at work stealing from other task's queues.
4127   if (do_stealing && !has_aborted()) {
4128     // We have not aborted. This means that we have finished all that
4129     // we could. Let's try to do some stealing...
4130 
4131     // We cannot check whether the global stack is empty, since other
4132     // tasks might be pushing objects to it concurrently.
4133     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4134            "only way to reach here");
4135 
4136     if (_cm->verbose_low()) {
4137       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4138     }
4139 
4140     while (!has_aborted()) {
4141       oop obj;
4142       statsOnly( ++_steal_attempts );
4143 
4144       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4145         if (_cm->verbose_medium()) {
4146           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4147                                  _worker_id, p2i((void*) obj));
4148         }
4149 
4150         statsOnly( ++_steals );
4151 
4152         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4153                "any stolen object should be marked");
4154         scan_object(obj);
4155 
4156         // And since we're towards the end, let's totally drain the
4157         // local queue and global stack.
4158         drain_local_queue(false);
4159         drain_global_stack(false);
4160       } else {
4161         break;
4162       }
4163     }
4164   }
4165 
4166   // If we are about to wrap up and go into termination, check if we
4167   // should raise the overflow flag.
4168   if (do_termination && !has_aborted()) {
4169     if (_cm->force_overflow()->should_force()) {
4170       _cm->set_has_overflown();
4171       regular_clock_call();
4172     }
4173   }
4174 
4175   // We still haven't aborted. Now, let's try to get into the
4176   // termination protocol.
4177   if (do_termination && !has_aborted()) {
4178     // We cannot check whether the global stack is empty, since other
4179     // tasks might be concurrently pushing objects on it.
4180     // Separated the asserts so that we know which one fires.
4181     assert(_cm->out_of_regions(), "only way to reach here");
4182     assert(_task_queue->size() == 0, "only way to reach here");
4183 
4184     if (_cm->verbose_low()) {
4185       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4186     }
4187 
4188     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4189 
4190     // The CMTask class also extends the TerminatorTerminator class,
4191     // hence its should_exit_termination() method will also decide
4192     // whether to exit the termination protocol or not.
4193     bool finished = (is_serial ||
4194                      _cm->terminator()->offer_termination(this));
4195     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4196     _termination_time_ms +=
4197       termination_end_time_ms - _termination_start_time_ms;
4198 
4199     if (finished) {
4200       // We're all done.
4201 
4202       if (_worker_id == 0) {
4203         // let's allow task 0 to do this
4204         if (concurrent()) {
4205           assert(_cm->concurrent_marking_in_progress(), "invariant");
4206           // we need to set this to false before the next
4207           // safepoint. This way we ensure that the marking phase
4208           // doesn't observe any more heap expansions.
4209           _cm->clear_concurrent_marking_in_progress();
4210         }
4211       }
4212 
4213       // We can now guarantee that the global stack is empty, since
4214       // all other tasks have finished. We separated the guarantees so
4215       // that, if a condition is false, we can immediately find out
4216       // which one.
4217       guarantee(_cm->out_of_regions(), "only way to reach here");
4218       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4219       guarantee(_task_queue->size() == 0, "only way to reach here");
4220       guarantee(!_cm->has_overflown(), "only way to reach here");
4221       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4222 
4223       if (_cm->verbose_low()) {
4224         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4225       }
4226     } else {
4227       // Apparently there's more work to do. Let's abort this task. It
4228       // will restart it and we can hopefully find more things to do.
4229 
4230       if (_cm->verbose_low()) {
4231         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4232                                _worker_id);
4233       }
4234 
4235       set_has_aborted();
4236       statsOnly( ++_aborted_termination );
4237     }
4238   }
4239 
4240   // Mainly for debugging purposes to make sure that a pointer to the
4241   // closure which was statically allocated in this frame doesn't
4242   // escape it by accident.
4243   set_cm_oop_closure(NULL);
4244   double end_time_ms = os::elapsedVTime() * 1000.0;
4245   double elapsed_time_ms = end_time_ms - _start_time_ms;
4246   // Update the step history.
4247   _step_times_ms.add(elapsed_time_ms);
4248 
4249   if (has_aborted()) {
4250     // The task was aborted for some reason.
4251 
4252     statsOnly( ++_aborted );
4253 
4254     if (_has_timed_out) {
4255       double diff_ms = elapsed_time_ms - _time_target_ms;
4256       // Keep statistics of how well we did with respect to hitting
4257       // our target only if we actually timed out (if we aborted for
4258       // other reasons, then the results might get skewed).
4259       _marking_step_diffs_ms.add(diff_ms);
4260     }
4261 
4262     if (_cm->has_overflown()) {
4263       // This is the interesting one. We aborted because a global
4264       // overflow was raised. This means we have to restart the
4265       // marking phase and start iterating over regions. However, in
4266       // order to do this we have to make sure that all tasks stop
4267       // what they are doing and re-initialize in a safe manner. We
4268       // will achieve this with the use of two barrier sync points.
4269 
4270       if (_cm->verbose_low()) {
4271         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4272       }
4273 
4274       if (!is_serial) {
4275         // We only need to enter the sync barrier if being called
4276         // from a parallel context
4277         _cm->enter_first_sync_barrier(_worker_id);
4278 
4279         // When we exit this sync barrier we know that all tasks have
4280         // stopped doing marking work. So, it's now safe to
4281         // re-initialize our data structures. At the end of this method,
4282         // task 0 will clear the global data structures.
4283       }
4284 
4285       statsOnly( ++_aborted_overflow );
4286 
4287       // We clear the local state of this task...
4288       clear_region_fields();
4289 
4290       if (!is_serial) {
4291         // ...and enter the second barrier.
4292         _cm->enter_second_sync_barrier(_worker_id);
4293       }
4294       // At this point, if we're during the concurrent phase of
4295       // marking, everything has been re-initialized and we're
4296       // ready to restart.
4297     }
4298 
4299     if (_cm->verbose_low()) {
4300       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4301                              "elapsed = %1.2lfms <<<<<<<<<<",
4302                              _worker_id, _time_target_ms, elapsed_time_ms);
4303       if (_cm->has_aborted()) {
4304         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4305                                _worker_id);
4306       }
4307     }
4308   } else {
4309     if (_cm->verbose_low()) {
4310       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4311                              "elapsed = %1.2lfms <<<<<<<<<<",
4312                              _worker_id, _time_target_ms, elapsed_time_ms);
4313     }
4314   }
4315 
4316   _claimed = false;
4317 }
4318 
4319 CMTask::CMTask(uint worker_id,
4320                ConcurrentMark* cm,
4321                size_t* marked_bytes,
4322                BitMap* card_bm,
4323                CMTaskQueue* task_queue,
4324                CMTaskQueueSet* task_queues)
4325   : _g1h(G1CollectedHeap::heap()),
4326     _worker_id(worker_id), _cm(cm),
4327     _claimed(false),
4328     _nextMarkBitMap(NULL), _hash_seed(17),
4329     _task_queue(task_queue),
4330     _task_queues(task_queues),
4331     _cm_oop_closure(NULL),
4332     _marked_bytes_array(marked_bytes),
4333     _card_bm(card_bm) {
4334   guarantee(task_queue != NULL, "invariant");
4335   guarantee(task_queues != NULL, "invariant");
4336 
4337   statsOnly( _clock_due_to_scanning = 0;
4338              _clock_due_to_marking  = 0 );
4339 
4340   _marking_step_diffs_ms.add(0.5);
4341 }
4342 
4343 // These are formatting macros that are used below to ensure
4344 // consistent formatting. The *_H_* versions are used to format the
4345 // header for a particular value and they should be kept consistent
4346 // with the corresponding macro. Also note that most of the macros add
4347 // the necessary white space (as a prefix) which makes them a bit
4348 // easier to compose.
4349 
4350 // All the output lines are prefixed with this string to be able to
4351 // identify them easily in a large log file.
4352 #define G1PPRL_LINE_PREFIX            "###"
4353 
4354 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4355 #ifdef _LP64
4356 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4357 #else // _LP64
4358 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4359 #endif // _LP64
4360 
4361 // For per-region info
4362 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4363 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4364 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4365 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4366 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4367 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4368 
4369 // For summary info
4370 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4371 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4372 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4373 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4374 
4375 G1PrintRegionLivenessInfoClosure::
4376 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4377   : _out(out),
4378     _total_used_bytes(0), _total_capacity_bytes(0),
4379     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4380     _hum_used_bytes(0), _hum_capacity_bytes(0),
4381     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4382     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4383   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4384   MemRegion g1_reserved = g1h->g1_reserved();
4385   double now = os::elapsedTime();
4386 
4387   // Print the header of the output.
4388   _out->cr();
4389   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4390   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4391                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4392                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4393                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4394                  HeapRegion::GrainBytes);
4395   _out->print_cr(G1PPRL_LINE_PREFIX);
4396   _out->print_cr(G1PPRL_LINE_PREFIX
4397                 G1PPRL_TYPE_H_FORMAT
4398                 G1PPRL_ADDR_BASE_H_FORMAT
4399                 G1PPRL_BYTE_H_FORMAT
4400                 G1PPRL_BYTE_H_FORMAT
4401                 G1PPRL_BYTE_H_FORMAT
4402                 G1PPRL_DOUBLE_H_FORMAT
4403                 G1PPRL_BYTE_H_FORMAT
4404                 G1PPRL_BYTE_H_FORMAT,
4405                 "type", "address-range",
4406                 "used", "prev-live", "next-live", "gc-eff",
4407                 "remset", "code-roots");
4408   _out->print_cr(G1PPRL_LINE_PREFIX
4409                 G1PPRL_TYPE_H_FORMAT
4410                 G1PPRL_ADDR_BASE_H_FORMAT
4411                 G1PPRL_BYTE_H_FORMAT
4412                 G1PPRL_BYTE_H_FORMAT
4413                 G1PPRL_BYTE_H_FORMAT
4414                 G1PPRL_DOUBLE_H_FORMAT
4415                 G1PPRL_BYTE_H_FORMAT
4416                 G1PPRL_BYTE_H_FORMAT,
4417                 "", "",
4418                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4419                 "(bytes)", "(bytes)");
4420 }
4421 
4422 // It takes as a parameter a reference to one of the _hum_* fields, it
4423 // deduces the corresponding value for a region in a humongous region
4424 // series (either the region size, or what's left if the _hum_* field
4425 // is < the region size), and updates the _hum_* field accordingly.
4426 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4427   size_t bytes = 0;
4428   // The > 0 check is to deal with the prev and next live bytes which
4429   // could be 0.
4430   if (*hum_bytes > 0) {
4431     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4432     *hum_bytes -= bytes;
4433   }
4434   return bytes;
4435 }
4436 
4437 // It deduces the values for a region in a humongous region series
4438 // from the _hum_* fields and updates those accordingly. It assumes
4439 // that that _hum_* fields have already been set up from the "starts
4440 // humongous" region and we visit the regions in address order.
4441 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4442                                                      size_t* capacity_bytes,
4443                                                      size_t* prev_live_bytes,
4444                                                      size_t* next_live_bytes) {
4445   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4446   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4447   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4448   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4449   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4450 }
4451 
4452 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4453   const char* type       = r->get_type_str();
4454   HeapWord* bottom       = r->bottom();
4455   HeapWord* end          = r->end();
4456   size_t capacity_bytes  = r->capacity();
4457   size_t used_bytes      = r->used();
4458   size_t prev_live_bytes = r->live_bytes();
4459   size_t next_live_bytes = r->next_live_bytes();
4460   double gc_eff          = r->gc_efficiency();
4461   size_t remset_bytes    = r->rem_set()->mem_size();
4462   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4463 
4464   if (r->is_starts_humongous()) {
4465     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4466            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4467            "they should have been zeroed after the last time we used them");
4468     // Set up the _hum_* fields.
4469     _hum_capacity_bytes  = capacity_bytes;
4470     _hum_used_bytes      = used_bytes;
4471     _hum_prev_live_bytes = prev_live_bytes;
4472     _hum_next_live_bytes = next_live_bytes;
4473     get_hum_bytes(&used_bytes, &capacity_bytes,
4474                   &prev_live_bytes, &next_live_bytes);
4475     end = bottom + HeapRegion::GrainWords;
4476   } else if (r->is_continues_humongous()) {
4477     get_hum_bytes(&used_bytes, &capacity_bytes,
4478                   &prev_live_bytes, &next_live_bytes);
4479     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4480   }
4481 
4482   _total_used_bytes      += used_bytes;
4483   _total_capacity_bytes  += capacity_bytes;
4484   _total_prev_live_bytes += prev_live_bytes;
4485   _total_next_live_bytes += next_live_bytes;
4486   _total_remset_bytes    += remset_bytes;
4487   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4488 
4489   // Print a line for this particular region.
4490   _out->print_cr(G1PPRL_LINE_PREFIX
4491                  G1PPRL_TYPE_FORMAT
4492                  G1PPRL_ADDR_BASE_FORMAT
4493                  G1PPRL_BYTE_FORMAT
4494                  G1PPRL_BYTE_FORMAT
4495                  G1PPRL_BYTE_FORMAT
4496                  G1PPRL_DOUBLE_FORMAT
4497                  G1PPRL_BYTE_FORMAT
4498                  G1PPRL_BYTE_FORMAT,
4499                  type, p2i(bottom), p2i(end),
4500                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4501                  remset_bytes, strong_code_roots_bytes);
4502 
4503   return false;
4504 }
4505 
4506 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4507   // add static memory usages to remembered set sizes
4508   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4509   // Print the footer of the output.
4510   _out->print_cr(G1PPRL_LINE_PREFIX);
4511   _out->print_cr(G1PPRL_LINE_PREFIX
4512                  " SUMMARY"
4513                  G1PPRL_SUM_MB_FORMAT("capacity")
4514                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4515                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4516                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4517                  G1PPRL_SUM_MB_FORMAT("remset")
4518                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4519                  bytes_to_mb(_total_capacity_bytes),
4520                  bytes_to_mb(_total_used_bytes),
4521                  perc(_total_used_bytes, _total_capacity_bytes),
4522                  bytes_to_mb(_total_prev_live_bytes),
4523                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4524                  bytes_to_mb(_total_next_live_bytes),
4525                  perc(_total_next_live_bytes, _total_capacity_bytes),
4526                  bytes_to_mb(_total_remset_bytes),
4527                  bytes_to_mb(_total_strong_code_roots_bytes));
4528   _out->cr();
4529 }