1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  33 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  34 #include "gc_implementation/g1/g1Log.hpp"
  35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  36 #include "gc_implementation/g1/g1RemSet.hpp"
  37 #include "gc_implementation/g1/heapRegion.inline.hpp"
  38 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
  39 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  40 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  41 #include "gc_implementation/shared/vmGCOperations.hpp"
  42 #include "gc_implementation/shared/gcTimer.hpp"
  43 #include "gc_implementation/shared/gcTrace.hpp"
  44 #include "gc_implementation/shared/gcTraceTime.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/genOopClosures.inline.hpp"
  47 #include "memory/referencePolicy.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/oop.inline.hpp"
  50 #include "runtime/handles.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/prefetch.inline.hpp"
  53 #include "services/memTracker.hpp"
  54 
  55 // Concurrent marking bit map wrapper
  56 
  57 CMBitMapRO::CMBitMapRO(int shifter) :
  58   _bm(),
  59   _shifter(shifter) {
  60   _bmStartWord = 0;
  61   _bmWordSize = 0;
  62 }
  63 
  64 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  65                                                const HeapWord* limit) const {
  66   // First we must round addr *up* to a possible object boundary.
  67   addr = (HeapWord*)align_size_up((intptr_t)addr,
  68                                   HeapWordSize << _shifter);
  69   size_t addrOffset = heapWordToOffset(addr);
  70   if (limit == NULL) {
  71     limit = _bmStartWord + _bmWordSize;
  72   }
  73   size_t limitOffset = heapWordToOffset(limit);
  74   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  75   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  76   assert(nextAddr >= addr, "get_next_one postcondition");
  77   assert(nextAddr == limit || isMarked(nextAddr),
  78          "get_next_one postcondition");
  79   return nextAddr;
  80 }
  81 
  82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  83                                                  const HeapWord* limit) const {
  84   size_t addrOffset = heapWordToOffset(addr);
  85   if (limit == NULL) {
  86     limit = _bmStartWord + _bmWordSize;
  87   }
  88   size_t limitOffset = heapWordToOffset(limit);
  89   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  90   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  91   assert(nextAddr >= addr, "get_next_one postcondition");
  92   assert(nextAddr == limit || !isMarked(nextAddr),
  93          "get_next_one postcondition");
  94   return nextAddr;
  95 }
  96 
  97 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  98   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  99   return (int) (diff >> _shifter);
 100 }
 101 
 102 #ifndef PRODUCT
 103 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 104   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 105   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 106          "size inconsistency");
 107   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 108          _bmWordSize  == heap_rs.word_size();
 109 }
 110 #endif
 111 
 112 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 113   _bm.print_on_error(st, prefix);
 114 }
 115 
 116 size_t CMBitMap::compute_size(size_t heap_size) {
 117   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 118 }
 119 
 120 size_t CMBitMap::mark_distance() {
 121   return MinObjAlignmentInBytes * BitsPerByte;
 122 }
 123 
 124 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 125   _bmStartWord = heap.start();
 126   _bmWordSize = heap.word_size();
 127 
 128   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 129   _bm.set_size(_bmWordSize >> _shifter);
 130 
 131   storage->set_mapping_changed_listener(&_listener);
 132 }
 133 
 134 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 135   if (zero_filled) {
 136     return;
 137   }
 138   // We need to clear the bitmap on commit, removing any existing information.
 139   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 140   _bm->clearRange(mr);
 141 }
 142 
 143 // Closure used for clearing the given mark bitmap.
 144 class ClearBitmapHRClosure : public HeapRegionClosure {
 145  private:
 146   ConcurrentMark* _cm;
 147   CMBitMap* _bitmap;
 148   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 149  public:
 150   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 151     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 152   }
 153 
 154   virtual bool doHeapRegion(HeapRegion* r) {
 155     size_t const chunk_size_in_words = M / HeapWordSize;
 156 
 157     HeapWord* cur = r->bottom();
 158     HeapWord* const end = r->end();
 159 
 160     while (cur < end) {
 161       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 162       _bitmap->clearRange(mr);
 163 
 164       cur += chunk_size_in_words;
 165 
 166       // Abort iteration if after yielding the marking has been aborted.
 167       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 168         return true;
 169       }
 170       // Repeat the asserts from before the start of the closure. We will do them
 171       // as asserts here to minimize their overhead on the product. However, we
 172       // will have them as guarantees at the beginning / end of the bitmap
 173       // clearing to get some checking in the product.
 174       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 175       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 176     }
 177 
 178     return false;
 179   }
 180 };
 181 
 182 void CMBitMap::clearAll() {
 183   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 184   G1CollectedHeap::heap()->heap_region_iterate(&cl);
 185   guarantee(cl.complete(), "Must have completed iteration.");
 186   return;
 187 }
 188 
 189 void CMBitMap::markRange(MemRegion mr) {
 190   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 191   assert(!mr.is_empty(), "unexpected empty region");
 192   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 193           ((HeapWord *) mr.end())),
 194          "markRange memory region end is not card aligned");
 195   // convert address range into offset range
 196   _bm.at_put_range(heapWordToOffset(mr.start()),
 197                    heapWordToOffset(mr.end()), true);
 198 }
 199 
 200 void CMBitMap::clearRange(MemRegion mr) {
 201   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 202   assert(!mr.is_empty(), "unexpected empty region");
 203   // convert address range into offset range
 204   _bm.at_put_range(heapWordToOffset(mr.start()),
 205                    heapWordToOffset(mr.end()), false);
 206 }
 207 
 208 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
 209                                             HeapWord* end_addr) {
 210   HeapWord* start = getNextMarkedWordAddress(addr);
 211   start = MIN2(start, end_addr);
 212   HeapWord* end   = getNextUnmarkedWordAddress(start);
 213   end = MIN2(end, end_addr);
 214   assert(start <= end, "Consistency check");
 215   MemRegion mr(start, end);
 216   if (!mr.is_empty()) {
 217     clearRange(mr);
 218   }
 219   return mr;
 220 }
 221 
 222 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
 223   _base(NULL), _cm(cm)
 224 #ifdef ASSERT
 225   , _drain_in_progress(false)
 226   , _drain_in_progress_yields(false)
 227 #endif
 228 {}
 229 
 230 bool CMMarkStack::allocate(size_t capacity) {
 231   // allocate a stack of the requisite depth
 232   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
 233   if (!rs.is_reserved()) {
 234     warning("ConcurrentMark MarkStack allocation failure");
 235     return false;
 236   }
 237   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 238   if (!_virtual_space.initialize(rs, rs.size())) {
 239     warning("ConcurrentMark MarkStack backing store failure");
 240     // Release the virtual memory reserved for the marking stack
 241     rs.release();
 242     return false;
 243   }
 244   assert(_virtual_space.committed_size() == rs.size(),
 245          "Didn't reserve backing store for all of ConcurrentMark stack?");
 246   _base = (oop*) _virtual_space.low();
 247   setEmpty();
 248   _capacity = (jint) capacity;
 249   _saved_index = -1;
 250   _should_expand = false;
 251   NOT_PRODUCT(_max_depth = 0);
 252   return true;
 253 }
 254 
 255 void CMMarkStack::expand() {
 256   // Called, during remark, if we've overflown the marking stack during marking.
 257   assert(isEmpty(), "stack should been emptied while handling overflow");
 258   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 259   // Clear expansion flag
 260   _should_expand = false;
 261   if (_capacity == (jint) MarkStackSizeMax) {
 262     if (PrintGCDetails && Verbose) {
 263       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 264     }
 265     return;
 266   }
 267   // Double capacity if possible
 268   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 269   // Do not give up existing stack until we have managed to
 270   // get the double capacity that we desired.
 271   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 272                                                            sizeof(oop)));
 273   if (rs.is_reserved()) {
 274     // Release the backing store associated with old stack
 275     _virtual_space.release();
 276     // Reinitialize virtual space for new stack
 277     if (!_virtual_space.initialize(rs, rs.size())) {
 278       fatal("Not enough swap for expanded marking stack capacity");
 279     }
 280     _base = (oop*)(_virtual_space.low());
 281     _index = 0;
 282     _capacity = new_capacity;
 283   } else {
 284     if (PrintGCDetails && Verbose) {
 285       // Failed to double capacity, continue;
 286       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 287                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
 288                           _capacity / K, new_capacity / K);
 289     }
 290   }
 291 }
 292 
 293 void CMMarkStack::set_should_expand() {
 294   // If we're resetting the marking state because of an
 295   // marking stack overflow, record that we should, if
 296   // possible, expand the stack.
 297   _should_expand = _cm->has_overflown();
 298 }
 299 
 300 CMMarkStack::~CMMarkStack() {
 301   if (_base != NULL) {
 302     _base = NULL;
 303     _virtual_space.release();
 304   }
 305 }
 306 
 307 void CMMarkStack::par_push(oop ptr) {
 308   while (true) {
 309     if (isFull()) {
 310       _overflow = true;
 311       return;
 312     }
 313     // Otherwise...
 314     jint index = _index;
 315     jint next_index = index+1;
 316     jint res = Atomic::cmpxchg(next_index, &_index, index);
 317     if (res == index) {
 318       _base[index] = ptr;
 319       // Note that we don't maintain this atomically.  We could, but it
 320       // doesn't seem necessary.
 321       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 322       return;
 323     }
 324     // Otherwise, we need to try again.
 325   }
 326 }
 327 
 328 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
 329   while (true) {
 330     if (isFull()) {
 331       _overflow = true;
 332       return;
 333     }
 334     // Otherwise...
 335     jint index = _index;
 336     jint next_index = index + n;
 337     if (next_index > _capacity) {
 338       _overflow = true;
 339       return;
 340     }
 341     jint res = Atomic::cmpxchg(next_index, &_index, index);
 342     if (res == index) {
 343       for (int i = 0; i < n; i++) {
 344         int  ind = index + i;
 345         assert(ind < _capacity, "By overflow test above.");
 346         _base[ind] = ptr_arr[i];
 347       }
 348       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 349       return;
 350     }
 351     // Otherwise, we need to try again.
 352   }
 353 }
 354 
 355 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 356   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 357   jint start = _index;
 358   jint next_index = start + n;
 359   if (next_index > _capacity) {
 360     _overflow = true;
 361     return;
 362   }
 363   // Otherwise.
 364   _index = next_index;
 365   for (int i = 0; i < n; i++) {
 366     int ind = start + i;
 367     assert(ind < _capacity, "By overflow test above.");
 368     _base[ind] = ptr_arr[i];
 369   }
 370   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
 371 }
 372 
 373 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
 374   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 375   jint index = _index;
 376   if (index == 0) {
 377     *n = 0;
 378     return false;
 379   } else {
 380     int k = MIN2(max, index);
 381     jint  new_ind = index - k;
 382     for (int j = 0; j < k; j++) {
 383       ptr_arr[j] = _base[new_ind + j];
 384     }
 385     _index = new_ind;
 386     *n = k;
 387     return true;
 388   }
 389 }
 390 
 391 template<class OopClosureClass>
 392 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
 393   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
 394          || SafepointSynchronize::is_at_safepoint(),
 395          "Drain recursion must be yield-safe.");
 396   bool res = true;
 397   debug_only(_drain_in_progress = true);
 398   debug_only(_drain_in_progress_yields = yield_after);
 399   while (!isEmpty()) {
 400     oop newOop = pop();
 401     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
 402     assert(newOop->is_oop(), "Expected an oop");
 403     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
 404            "only grey objects on this stack");
 405     newOop->oop_iterate(cl);
 406     if (yield_after && _cm->do_yield_check()) {
 407       res = false;
 408       break;
 409     }
 410   }
 411   debug_only(_drain_in_progress = false);
 412   return res;
 413 }
 414 
 415 void CMMarkStack::note_start_of_gc() {
 416   assert(_saved_index == -1,
 417          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
 418   _saved_index = _index;
 419 }
 420 
 421 void CMMarkStack::note_end_of_gc() {
 422   // This is intentionally a guarantee, instead of an assert. If we
 423   // accidentally add something to the mark stack during GC, it
 424   // will be a correctness issue so it's better if we crash. we'll
 425   // only check this once per GC anyway, so it won't be a performance
 426   // issue in any way.
 427   guarantee(_saved_index == _index,
 428             err_msg("saved index: %d index: %d", _saved_index, _index));
 429   _saved_index = -1;
 430 }
 431 
 432 void CMMarkStack::oops_do(OopClosure* f) {
 433   assert(_saved_index == _index,
 434          err_msg("saved index: %d index: %d", _saved_index, _index));
 435   for (int i = 0; i < _index; i += 1) {
 436     f->do_oop(&_base[i]);
 437   }
 438 }
 439 
 440 CMRootRegions::CMRootRegions() :
 441   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 442   _should_abort(false),  _next_survivor(NULL) { }
 443 
 444 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 445   _young_list = g1h->young_list();
 446   _cm = cm;
 447 }
 448 
 449 void CMRootRegions::prepare_for_scan() {
 450   assert(!scan_in_progress(), "pre-condition");
 451 
 452   // Currently, only survivors can be root regions.
 453   assert(_next_survivor == NULL, "pre-condition");
 454   _next_survivor = _young_list->first_survivor_region();
 455   _scan_in_progress = (_next_survivor != NULL);
 456   _should_abort = false;
 457 }
 458 
 459 HeapRegion* CMRootRegions::claim_next() {
 460   if (_should_abort) {
 461     // If someone has set the should_abort flag, we return NULL to
 462     // force the caller to bail out of their loop.
 463     return NULL;
 464   }
 465 
 466   // Currently, only survivors can be root regions.
 467   HeapRegion* res = _next_survivor;
 468   if (res != NULL) {
 469     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 470     // Read it again in case it changed while we were waiting for the lock.
 471     res = _next_survivor;
 472     if (res != NULL) {
 473       if (res == _young_list->last_survivor_region()) {
 474         // We just claimed the last survivor so store NULL to indicate
 475         // that we're done.
 476         _next_survivor = NULL;
 477       } else {
 478         _next_survivor = res->get_next_young_region();
 479       }
 480     } else {
 481       // Someone else claimed the last survivor while we were trying
 482       // to take the lock so nothing else to do.
 483     }
 484   }
 485   assert(res == NULL || res->is_survivor(), "post-condition");
 486 
 487   return res;
 488 }
 489 
 490 void CMRootRegions::scan_finished() {
 491   assert(scan_in_progress(), "pre-condition");
 492 
 493   // Currently, only survivors can be root regions.
 494   if (!_should_abort) {
 495     assert(_next_survivor == NULL, "we should have claimed all survivors");
 496   }
 497   _next_survivor = NULL;
 498 
 499   {
 500     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 501     _scan_in_progress = false;
 502     RootRegionScan_lock->notify_all();
 503   }
 504 }
 505 
 506 bool CMRootRegions::wait_until_scan_finished() {
 507   if (!scan_in_progress()) return false;
 508 
 509   {
 510     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 511     while (scan_in_progress()) {
 512       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 513     }
 514   }
 515   return true;
 516 }
 517 
 518 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 519 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 520 #endif // _MSC_VER
 521 
 522 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 523   return MAX2((n_par_threads + 2) / 4, 1U);
 524 }
 525 
 526 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 527   _g1h(g1h),
 528   _markBitMap1(),
 529   _markBitMap2(),
 530   _parallel_marking_threads(0),
 531   _max_parallel_marking_threads(0),
 532   _sleep_factor(0.0),
 533   _marking_task_overhead(1.0),
 534   _cleanup_sleep_factor(0.0),
 535   _cleanup_task_overhead(1.0),
 536   _cleanup_list("Cleanup List"),
 537   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
 538   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
 539             CardTableModRefBS::card_shift,
 540             false /* in_resource_area*/),
 541 
 542   _prevMarkBitMap(&_markBitMap1),
 543   _nextMarkBitMap(&_markBitMap2),
 544 
 545   _markStack(this),
 546   // _finger set in set_non_marking_state
 547 
 548   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
 549   // _active_tasks set in set_non_marking_state
 550   // _tasks set inside the constructor
 551   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
 552   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 553 
 554   _has_overflown(false),
 555   _concurrent(false),
 556   _has_aborted(false),
 557   _aborted_gc_id(GCId::undefined()),
 558   _restart_for_overflow(false),
 559   _concurrent_marking_in_progress(false),
 560 
 561   // _verbose_level set below
 562 
 563   _init_times(),
 564   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 565   _cleanup_times(),
 566   _total_counting_time(0.0),
 567   _total_rs_scrub_time(0.0),
 568 
 569   _parallel_workers(NULL),
 570 
 571   _count_card_bitmaps(NULL),
 572   _count_marked_bytes(NULL),
 573   _completed_initialization(false) {
 574   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
 575   if (verbose_level < no_verbose) {
 576     verbose_level = no_verbose;
 577   }
 578   if (verbose_level > high_verbose) {
 579     verbose_level = high_verbose;
 580   }
 581   _verbose_level = verbose_level;
 582 
 583   if (verbose_low()) {
 584     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
 585                            "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
 586   }
 587 
 588   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 589   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 590 
 591   // Create & start a ConcurrentMark thread.
 592   _cmThread = new ConcurrentMarkThread(this);
 593   assert(cmThread() != NULL, "CM Thread should have been created");
 594   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 595   if (_cmThread->osthread() == NULL) {
 596       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 597   }
 598 
 599   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 600   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 601   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 602 
 603   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 604   satb_qs.set_buffer_size(G1SATBBufferSize);
 605 
 606   _root_regions.init(_g1h, this);
 607 
 608   if (ConcGCThreads > ParallelGCThreads) {
 609     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
 610             "than ParallelGCThreads (" UINTX_FORMAT ").",
 611             ConcGCThreads, ParallelGCThreads);
 612     return;
 613   }
 614   if (ParallelGCThreads == 0) {
 615     // if we are not running with any parallel GC threads we will not
 616     // spawn any marking threads either
 617     _parallel_marking_threads =       0;
 618     _max_parallel_marking_threads =   0;
 619     _sleep_factor             =     0.0;
 620     _marking_task_overhead    =     1.0;
 621   } else {
 622     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 623       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 624       // if both are set
 625       _sleep_factor             = 0.0;
 626       _marking_task_overhead    = 1.0;
 627     } else if (G1MarkingOverheadPercent > 0) {
 628       // We will calculate the number of parallel marking threads based
 629       // on a target overhead with respect to the soft real-time goal
 630       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 631       double overall_cm_overhead =
 632         (double) MaxGCPauseMillis * marking_overhead /
 633         (double) GCPauseIntervalMillis;
 634       double cpu_ratio = 1.0 / (double) os::processor_count();
 635       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 636       double marking_task_overhead =
 637         overall_cm_overhead / marking_thread_num *
 638                                                 (double) os::processor_count();
 639       double sleep_factor =
 640                          (1.0 - marking_task_overhead) / marking_task_overhead;
 641 
 642       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
 643       _sleep_factor             = sleep_factor;
 644       _marking_task_overhead    = marking_task_overhead;
 645     } else {
 646       // Calculate the number of parallel marking threads by scaling
 647       // the number of parallel GC threads.
 648       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
 649       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
 650       _sleep_factor             = 0.0;
 651       _marking_task_overhead    = 1.0;
 652     }
 653 
 654     assert(ConcGCThreads > 0, "Should have been set");
 655     _parallel_marking_threads = (uint) ConcGCThreads;
 656     _max_parallel_marking_threads = _parallel_marking_threads;
 657 
 658     if (parallel_marking_threads() > 1) {
 659       _cleanup_task_overhead = 1.0;
 660     } else {
 661       _cleanup_task_overhead = marking_task_overhead();
 662     }
 663     _cleanup_sleep_factor =
 664                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 665 
 666 #if 0
 667     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 668     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 669     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 670     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 671     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 672 #endif
 673 
 674     guarantee(parallel_marking_threads() > 0, "peace of mind");
 675     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
 676          _max_parallel_marking_threads, false, true);
 677     if (_parallel_workers == NULL) {
 678       vm_exit_during_initialization("Failed necessary allocation.");
 679     } else {
 680       _parallel_workers->initialize_workers();
 681     }
 682   }
 683 
 684   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 685     uintx mark_stack_size =
 686       MIN2(MarkStackSizeMax,
 687           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 688     // Verify that the calculated value for MarkStackSize is in range.
 689     // It would be nice to use the private utility routine from Arguments.
 690     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 691       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
 692               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 693               mark_stack_size, (uintx) 1, MarkStackSizeMax);
 694       return;
 695     }
 696     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
 697   } else {
 698     // Verify MarkStackSize is in range.
 699     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 700       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 701         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 702           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
 703                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
 704                   MarkStackSize, (uintx) 1, MarkStackSizeMax);
 705           return;
 706         }
 707       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 708         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 709           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
 710                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
 711                   MarkStackSize, MarkStackSizeMax);
 712           return;
 713         }
 714       }
 715     }
 716   }
 717 
 718   if (!_markStack.allocate(MarkStackSize)) {
 719     warning("Failed to allocate CM marking stack");
 720     return;
 721   }
 722 
 723   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
 724   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 725 
 726   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
 727   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
 728 
 729   BitMap::idx_t card_bm_size = _card_bm.size();
 730 
 731   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 732   _active_tasks = _max_worker_id;
 733 
 734   size_t max_regions = (size_t) _g1h->max_regions();
 735   for (uint i = 0; i < _max_worker_id; ++i) {
 736     CMTaskQueue* task_queue = new CMTaskQueue();
 737     task_queue->initialize();
 738     _task_queues->register_queue(i, task_queue);
 739 
 740     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
 741     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 742 
 743     _tasks[i] = new CMTask(i, this,
 744                            _count_marked_bytes[i],
 745                            &_count_card_bitmaps[i],
 746                            task_queue, _task_queues);
 747 
 748     _accum_task_vtime[i] = 0.0;
 749   }
 750 
 751   // Calculate the card number for the bottom of the heap. Used
 752   // in biasing indexes into the accounting card bitmaps.
 753   _heap_bottom_card_num =
 754     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
 755                                 CardTableModRefBS::card_shift);
 756 
 757   // Clear all the liveness counting data
 758   clear_all_count_data();
 759 
 760   // so that the call below can read a sensible value
 761   _heap_start = g1h->reserved_region().start();
 762   set_non_marking_state();
 763   _completed_initialization = true;
 764 }
 765 
 766 void ConcurrentMark::reset() {
 767   // Starting values for these two. This should be called in a STW
 768   // phase.
 769   MemRegion reserved = _g1h->g1_reserved();
 770   _heap_start = reserved.start();
 771   _heap_end   = reserved.end();
 772 
 773   // Separated the asserts so that we know which one fires.
 774   assert(_heap_start != NULL, "heap bounds should look ok");
 775   assert(_heap_end != NULL, "heap bounds should look ok");
 776   assert(_heap_start < _heap_end, "heap bounds should look ok");
 777 
 778   // Reset all the marking data structures and any necessary flags
 779   reset_marking_state();
 780 
 781   if (verbose_low()) {
 782     gclog_or_tty->print_cr("[global] resetting");
 783   }
 784 
 785   // We do reset all of them, since different phases will use
 786   // different number of active threads. So, it's easiest to have all
 787   // of them ready.
 788   for (uint i = 0; i < _max_worker_id; ++i) {
 789     _tasks[i]->reset(_nextMarkBitMap);
 790   }
 791 
 792   // we need this to make sure that the flag is on during the evac
 793   // pause with initial mark piggy-backed
 794   set_concurrent_marking_in_progress();
 795 }
 796 
 797 
 798 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
 799   _markStack.set_should_expand();
 800   _markStack.setEmpty();        // Also clears the _markStack overflow flag
 801   if (clear_overflow) {
 802     clear_has_overflown();
 803   } else {
 804     assert(has_overflown(), "pre-condition");
 805   }
 806   _finger = _heap_start;
 807 
 808   for (uint i = 0; i < _max_worker_id; ++i) {
 809     CMTaskQueue* queue = _task_queues->queue(i);
 810     queue->set_empty();
 811   }
 812 }
 813 
 814 void ConcurrentMark::set_concurrency(uint active_tasks) {
 815   assert(active_tasks <= _max_worker_id, "we should not have more");
 816 
 817   _active_tasks = active_tasks;
 818   // Need to update the three data structures below according to the
 819   // number of active threads for this phase.
 820   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 821   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 822   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 823 }
 824 
 825 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 826   set_concurrency(active_tasks);
 827 
 828   _concurrent = concurrent;
 829   // We propagate this to all tasks, not just the active ones.
 830   for (uint i = 0; i < _max_worker_id; ++i)
 831     _tasks[i]->set_concurrent(concurrent);
 832 
 833   if (concurrent) {
 834     set_concurrent_marking_in_progress();
 835   } else {
 836     // We currently assume that the concurrent flag has been set to
 837     // false before we start remark. At this point we should also be
 838     // in a STW phase.
 839     assert(!concurrent_marking_in_progress(), "invariant");
 840     assert(out_of_regions(),
 841            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
 842                    p2i(_finger), p2i(_heap_end)));
 843   }
 844 }
 845 
 846 void ConcurrentMark::set_non_marking_state() {
 847   // We set the global marking state to some default values when we're
 848   // not doing marking.
 849   reset_marking_state();
 850   _active_tasks = 0;
 851   clear_concurrent_marking_in_progress();
 852 }
 853 
 854 ConcurrentMark::~ConcurrentMark() {
 855   // The ConcurrentMark instance is never freed.
 856   ShouldNotReachHere();
 857 }
 858 
 859 void ConcurrentMark::clearNextBitmap() {
 860   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 861 
 862   // Make sure that the concurrent mark thread looks to still be in
 863   // the current cycle.
 864   guarantee(cmThread()->during_cycle(), "invariant");
 865 
 866   // We are finishing up the current cycle by clearing the next
 867   // marking bitmap and getting it ready for the next cycle. During
 868   // this time no other cycle can start. So, let's make sure that this
 869   // is the case.
 870   guarantee(!g1h->mark_in_progress(), "invariant");
 871 
 872   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 873   g1h->heap_region_iterate(&cl);
 874 
 875   // Clear the liveness counting data. If the marking has been aborted, the abort()
 876   // call already did that.
 877   if (cl.complete()) {
 878     clear_all_count_data();
 879   }
 880 
 881   // Repeat the asserts from above.
 882   guarantee(cmThread()->during_cycle(), "invariant");
 883   guarantee(!g1h->mark_in_progress(), "invariant");
 884 }
 885 
 886 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 887   CMBitMap* _bitmap;
 888   bool _error;
 889  public:
 890   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 891   }
 892 
 893   virtual bool doHeapRegion(HeapRegion* r) {
 894     // This closure can be called concurrently to the mutator, so we must make sure
 895     // that the result of the getNextMarkedWordAddress() call is compared to the
 896     // value passed to it as limit to detect any found bits.
 897     // We can use the region's orig_end() for the limit and the comparison value
 898     // as it always contains the "real" end of the region that never changes and
 899     // has no side effects.
 900     // Due to the latter, there can also be no problem with the compiler generating
 901     // reloads of the orig_end() call.
 902     HeapWord* end = r->orig_end();
 903     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 904   }
 905 };
 906 
 907 bool ConcurrentMark::nextMarkBitmapIsClear() {
 908   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 909   _g1h->heap_region_iterate(&cl);
 910   return cl.complete();
 911 }
 912 
 913 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 914 public:
 915   bool doHeapRegion(HeapRegion* r) {
 916     if (!r->continuesHumongous()) {
 917       r->note_start_of_marking();
 918     }
 919     return false;
 920   }
 921 };
 922 
 923 void ConcurrentMark::checkpointRootsInitialPre() {
 924   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 925   G1CollectorPolicy* g1p = g1h->g1_policy();
 926 
 927   _has_aborted = false;
 928 
 929 #ifndef PRODUCT
 930   if (G1PrintReachableAtInitialMark) {
 931     print_reachable("at-cycle-start",
 932                     VerifyOption_G1UsePrevMarking, true /* all */);
 933   }
 934 #endif
 935 
 936   // Initialise marking structures. This has to be done in a STW phase.
 937   reset();
 938 
 939   // For each region note start of marking.
 940   NoteStartOfMarkHRClosure startcl;
 941   g1h->heap_region_iterate(&startcl);
 942 }
 943 
 944 
 945 void ConcurrentMark::checkpointRootsInitialPost() {
 946   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 947 
 948   // If we force an overflow during remark, the remark operation will
 949   // actually abort and we'll restart concurrent marking. If we always
 950   // force an oveflow during remark we'll never actually complete the
 951   // marking phase. So, we initilize this here, at the start of the
 952   // cycle, so that at the remaining overflow number will decrease at
 953   // every remark and we'll eventually not need to cause one.
 954   force_overflow_stw()->init();
 955 
 956   // Start Concurrent Marking weak-reference discovery.
 957   ReferenceProcessor* rp = g1h->ref_processor_cm();
 958   // enable ("weak") refs discovery
 959   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 960   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 961 
 962   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 963   // This is the start of  the marking cycle, we're expected all
 964   // threads to have SATB queues with active set to false.
 965   satb_mq_set.set_active_all_threads(true, /* new active value */
 966                                      false /* expected_active */);
 967 
 968   _root_regions.prepare_for_scan();
 969 
 970   // update_g1_committed() will be called at the end of an evac pause
 971   // when marking is on. So, it's also called at the end of the
 972   // initial-mark pause to update the heap end, if the heap expands
 973   // during it. No need to call it here.
 974 }
 975 
 976 /*
 977  * Notice that in the next two methods, we actually leave the STS
 978  * during the barrier sync and join it immediately afterwards. If we
 979  * do not do this, the following deadlock can occur: one thread could
 980  * be in the barrier sync code, waiting for the other thread to also
 981  * sync up, whereas another one could be trying to yield, while also
 982  * waiting for the other threads to sync up too.
 983  *
 984  * Note, however, that this code is also used during remark and in
 985  * this case we should not attempt to leave / enter the STS, otherwise
 986  * we'll either hit an asseert (debug / fastdebug) or deadlock
 987  * (product). So we should only leave / enter the STS if we are
 988  * operating concurrently.
 989  *
 990  * Because the thread that does the sync barrier has left the STS, it
 991  * is possible to be suspended for a Full GC or an evacuation pause
 992  * could occur. This is actually safe, since the entering the sync
 993  * barrier is one of the last things do_marking_step() does, and it
 994  * doesn't manipulate any data structures afterwards.
 995  */
 996 
 997 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 998   if (verbose_low()) {
 999     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
1000   }
1001 
1002   if (concurrent()) {
1003     SuspendibleThreadSet::leave();
1004   }
1005 
1006   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
1007 
1008   if (concurrent()) {
1009     SuspendibleThreadSet::join();
1010   }
1011   // at this point everyone should have synced up and not be doing any
1012   // more work
1013 
1014   if (verbose_low()) {
1015     if (barrier_aborted) {
1016       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1017     } else {
1018       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1019     }
1020   }
1021 
1022   if (barrier_aborted) {
1023     // If the barrier aborted we ignore the overflow condition and
1024     // just abort the whole marking phase as quickly as possible.
1025     return;
1026   }
1027 
1028   // If we're executing the concurrent phase of marking, reset the marking
1029   // state; otherwise the marking state is reset after reference processing,
1030   // during the remark pause.
1031   // If we reset here as a result of an overflow during the remark we will
1032   // see assertion failures from any subsequent set_concurrency_and_phase()
1033   // calls.
1034   if (concurrent()) {
1035     // let the task associated with with worker 0 do this
1036     if (worker_id == 0) {
1037       // task 0 is responsible for clearing the global data structures
1038       // We should be here because of an overflow. During STW we should
1039       // not clear the overflow flag since we rely on it being true when
1040       // we exit this method to abort the pause and restart concurent
1041       // marking.
1042       reset_marking_state(true /* clear_overflow */);
1043       force_overflow()->update();
1044 
1045       if (G1Log::fine()) {
1046         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1047         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1048       }
1049     }
1050   }
1051 
1052   // after this, each task should reset its own data structures then
1053   // then go into the second barrier
1054 }
1055 
1056 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1057   if (verbose_low()) {
1058     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1059   }
1060 
1061   if (concurrent()) {
1062     SuspendibleThreadSet::leave();
1063   }
1064 
1065   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1066 
1067   if (concurrent()) {
1068     SuspendibleThreadSet::join();
1069   }
1070   // at this point everything should be re-initialized and ready to go
1071 
1072   if (verbose_low()) {
1073     if (barrier_aborted) {
1074       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1075     } else {
1076       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1077     }
1078   }
1079 }
1080 
1081 #ifndef PRODUCT
1082 void ForceOverflowSettings::init() {
1083   _num_remaining = G1ConcMarkForceOverflow;
1084   _force = false;
1085   update();
1086 }
1087 
1088 void ForceOverflowSettings::update() {
1089   if (_num_remaining > 0) {
1090     _num_remaining -= 1;
1091     _force = true;
1092   } else {
1093     _force = false;
1094   }
1095 }
1096 
1097 bool ForceOverflowSettings::should_force() {
1098   if (_force) {
1099     _force = false;
1100     return true;
1101   } else {
1102     return false;
1103   }
1104 }
1105 #endif // !PRODUCT
1106 
1107 class CMConcurrentMarkingTask: public AbstractGangTask {
1108 private:
1109   ConcurrentMark*       _cm;
1110   ConcurrentMarkThread* _cmt;
1111 
1112 public:
1113   void work(uint worker_id) {
1114     assert(Thread::current()->is_ConcurrentGC_thread(),
1115            "this should only be done by a conc GC thread");
1116     ResourceMark rm;
1117 
1118     double start_vtime = os::elapsedVTime();
1119 
1120     SuspendibleThreadSet::join();
1121 
1122     assert(worker_id < _cm->active_tasks(), "invariant");
1123     CMTask* the_task = _cm->task(worker_id);
1124     the_task->record_start_time();
1125     if (!_cm->has_aborted()) {
1126       do {
1127         double start_vtime_sec = os::elapsedVTime();
1128         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1129 
1130         the_task->do_marking_step(mark_step_duration_ms,
1131                                   true  /* do_termination */,
1132                                   false /* is_serial*/);
1133 
1134         double end_vtime_sec = os::elapsedVTime();
1135         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1136         _cm->clear_has_overflown();
1137 
1138         _cm->do_yield_check(worker_id);
1139 
1140         jlong sleep_time_ms;
1141         if (!_cm->has_aborted() && the_task->has_aborted()) {
1142           sleep_time_ms =
1143             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1144           SuspendibleThreadSet::leave();
1145           os::sleep(Thread::current(), sleep_time_ms, false);
1146           SuspendibleThreadSet::join();
1147         }
1148       } while (!_cm->has_aborted() && the_task->has_aborted());
1149     }
1150     the_task->record_end_time();
1151     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1152 
1153     SuspendibleThreadSet::leave();
1154 
1155     double end_vtime = os::elapsedVTime();
1156     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1157   }
1158 
1159   CMConcurrentMarkingTask(ConcurrentMark* cm,
1160                           ConcurrentMarkThread* cmt) :
1161       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1162 
1163   ~CMConcurrentMarkingTask() { }
1164 };
1165 
1166 // Calculates the number of active workers for a concurrent
1167 // phase.
1168 uint ConcurrentMark::calc_parallel_marking_threads() {
1169   if (G1CollectedHeap::use_parallel_gc_threads()) {
1170     uint n_conc_workers = 0;
1171     if (!UseDynamicNumberOfGCThreads ||
1172         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1173          !ForceDynamicNumberOfGCThreads)) {
1174       n_conc_workers = max_parallel_marking_threads();
1175     } else {
1176       n_conc_workers =
1177         AdaptiveSizePolicy::calc_default_active_workers(
1178                                      max_parallel_marking_threads(),
1179                                      1, /* Minimum workers */
1180                                      parallel_marking_threads(),
1181                                      Threads::number_of_non_daemon_threads());
1182       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1183       // that scaling has already gone into "_max_parallel_marking_threads".
1184     }
1185     assert(n_conc_workers > 0, "Always need at least 1");
1186     return n_conc_workers;
1187   }
1188   // If we are not running with any parallel GC threads we will not
1189   // have spawned any marking threads either. Hence the number of
1190   // concurrent workers should be 0.
1191   return 0;
1192 }
1193 
1194 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1195   // Currently, only survivors can be root regions.
1196   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1197   G1RootRegionScanClosure cl(_g1h, this, worker_id);
1198 
1199   const uintx interval = PrefetchScanIntervalInBytes;
1200   HeapWord* curr = hr->bottom();
1201   const HeapWord* end = hr->top();
1202   while (curr < end) {
1203     Prefetch::read(curr, interval);
1204     oop obj = oop(curr);
1205     int size = obj->oop_iterate(&cl);
1206     assert(size == obj->size(), "sanity");
1207     curr += size;
1208   }
1209 }
1210 
1211 class CMRootRegionScanTask : public AbstractGangTask {
1212 private:
1213   ConcurrentMark* _cm;
1214 
1215 public:
1216   CMRootRegionScanTask(ConcurrentMark* cm) :
1217     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1218 
1219   void work(uint worker_id) {
1220     assert(Thread::current()->is_ConcurrentGC_thread(),
1221            "this should only be done by a conc GC thread");
1222 
1223     CMRootRegions* root_regions = _cm->root_regions();
1224     HeapRegion* hr = root_regions->claim_next();
1225     while (hr != NULL) {
1226       _cm->scanRootRegion(hr, worker_id);
1227       hr = root_regions->claim_next();
1228     }
1229   }
1230 };
1231 
1232 void ConcurrentMark::scanRootRegions() {
1233   // Start of concurrent marking.
1234   ClassLoaderDataGraph::clear_claimed_marks();
1235 
1236   // scan_in_progress() will have been set to true only if there was
1237   // at least one root region to scan. So, if it's false, we
1238   // should not attempt to do any further work.
1239   if (root_regions()->scan_in_progress()) {
1240     _parallel_marking_threads = calc_parallel_marking_threads();
1241     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1242            "Maximum number of marking threads exceeded");
1243     uint active_workers = MAX2(1U, parallel_marking_threads());
1244 
1245     CMRootRegionScanTask task(this);
1246     if (use_parallel_marking_threads()) {
1247       _parallel_workers->set_active_workers((int) active_workers);
1248       _parallel_workers->run_task(&task);
1249     } else {
1250       task.work(0);
1251     }
1252 
1253     // It's possible that has_aborted() is true here without actually
1254     // aborting the survivor scan earlier. This is OK as it's
1255     // mainly used for sanity checking.
1256     root_regions()->scan_finished();
1257   }
1258 }
1259 
1260 void ConcurrentMark::markFromRoots() {
1261   // we might be tempted to assert that:
1262   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1263   //        "inconsistent argument?");
1264   // However that wouldn't be right, because it's possible that
1265   // a safepoint is indeed in progress as a younger generation
1266   // stop-the-world GC happens even as we mark in this generation.
1267 
1268   _restart_for_overflow = false;
1269   force_overflow_conc()->init();
1270 
1271   // _g1h has _n_par_threads
1272   _parallel_marking_threads = calc_parallel_marking_threads();
1273   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1274     "Maximum number of marking threads exceeded");
1275 
1276   uint active_workers = MAX2(1U, parallel_marking_threads());
1277 
1278   // Parallel task terminator is set in "set_concurrency_and_phase()"
1279   set_concurrency_and_phase(active_workers, true /* concurrent */);
1280 
1281   CMConcurrentMarkingTask markingTask(this, cmThread());
1282   if (use_parallel_marking_threads()) {
1283     _parallel_workers->set_active_workers((int)active_workers);
1284     // Don't set _n_par_threads because it affects MT in process_roots()
1285     // and the decisions on that MT processing is made elsewhere.
1286     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1287     _parallel_workers->run_task(&markingTask);
1288   } else {
1289     markingTask.work(0);
1290   }
1291   print_stats();
1292 }
1293 
1294 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1295   // world is stopped at this checkpoint
1296   assert(SafepointSynchronize::is_at_safepoint(),
1297          "world should be stopped");
1298 
1299   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1300 
1301   // If a full collection has happened, we shouldn't do this.
1302   if (has_aborted()) {
1303     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1304     return;
1305   }
1306 
1307   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1308 
1309   if (VerifyDuringGC) {
1310     HandleMark hm;  // handle scope
1311     Universe::heap()->prepare_for_verify();
1312     Universe::verify(VerifyOption_G1UsePrevMarking,
1313                      " VerifyDuringGC:(before)");
1314   }
1315   g1h->check_bitmaps("Remark Start");
1316 
1317   G1CollectorPolicy* g1p = g1h->g1_policy();
1318   g1p->record_concurrent_mark_remark_start();
1319 
1320   double start = os::elapsedTime();
1321 
1322   checkpointRootsFinalWork();
1323 
1324   double mark_work_end = os::elapsedTime();
1325 
1326   weakRefsWork(clear_all_soft_refs);
1327 
1328   if (has_overflown()) {
1329     // Oops.  We overflowed.  Restart concurrent marking.
1330     _restart_for_overflow = true;
1331     if (G1TraceMarkStackOverflow) {
1332       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1333     }
1334 
1335     // Verify the heap w.r.t. the previous marking bitmap.
1336     if (VerifyDuringGC) {
1337       HandleMark hm;  // handle scope
1338       Universe::heap()->prepare_for_verify();
1339       Universe::verify(VerifyOption_G1UsePrevMarking,
1340                        " VerifyDuringGC:(overflow)");
1341     }
1342 
1343     // Clear the marking state because we will be restarting
1344     // marking due to overflowing the global mark stack.
1345     reset_marking_state();
1346   } else {
1347     // Aggregate the per-task counting data that we have accumulated
1348     // while marking.
1349     aggregate_count_data();
1350 
1351     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1352     // We're done with marking.
1353     // This is the end of  the marking cycle, we're expected all
1354     // threads to have SATB queues with active set to true.
1355     satb_mq_set.set_active_all_threads(false, /* new active value */
1356                                        true /* expected_active */);
1357 
1358     if (VerifyDuringGC) {
1359       HandleMark hm;  // handle scope
1360       Universe::heap()->prepare_for_verify();
1361       Universe::verify(VerifyOption_G1UseNextMarking,
1362                        " VerifyDuringGC:(after)");
1363     }
1364     g1h->check_bitmaps("Remark End");
1365     assert(!restart_for_overflow(), "sanity");
1366     // Completely reset the marking state since marking completed
1367     set_non_marking_state();
1368   }
1369 
1370   // Expand the marking stack, if we have to and if we can.
1371   if (_markStack.should_expand()) {
1372     _markStack.expand();
1373   }
1374 
1375   // Statistics
1376   double now = os::elapsedTime();
1377   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1378   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1379   _remark_times.add((now - start) * 1000.0);
1380 
1381   g1p->record_concurrent_mark_remark_end();
1382 
1383   G1CMIsAliveClosure is_alive(g1h);
1384   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1385 }
1386 
1387 // Base class of the closures that finalize and verify the
1388 // liveness counting data.
1389 class CMCountDataClosureBase: public HeapRegionClosure {
1390 protected:
1391   G1CollectedHeap* _g1h;
1392   ConcurrentMark* _cm;
1393   CardTableModRefBS* _ct_bs;
1394 
1395   BitMap* _region_bm;
1396   BitMap* _card_bm;
1397 
1398   // Takes a region that's not empty (i.e., it has at least one
1399   // live object in it and sets its corresponding bit on the region
1400   // bitmap to 1. If the region is "starts humongous" it will also set
1401   // to 1 the bits on the region bitmap that correspond to its
1402   // associated "continues humongous" regions.
1403   void set_bit_for_region(HeapRegion* hr) {
1404     assert(!hr->continuesHumongous(), "should have filtered those out");
1405 
1406     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1407     if (!hr->startsHumongous()) {
1408       // Normal (non-humongous) case: just set the bit.
1409       _region_bm->par_at_put(index, true);
1410     } else {
1411       // Starts humongous case: calculate how many regions are part of
1412       // this humongous region and then set the bit range.
1413       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1414       _region_bm->par_at_put_range(index, end_index, true);
1415     }
1416   }
1417 
1418 public:
1419   CMCountDataClosureBase(G1CollectedHeap* g1h,
1420                          BitMap* region_bm, BitMap* card_bm):
1421     _g1h(g1h), _cm(g1h->concurrent_mark()),
1422     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1423     _region_bm(region_bm), _card_bm(card_bm) { }
1424 };
1425 
1426 // Closure that calculates the # live objects per region. Used
1427 // for verification purposes during the cleanup pause.
1428 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1429   CMBitMapRO* _bm;
1430   size_t _region_marked_bytes;
1431 
1432 public:
1433   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1434                          BitMap* region_bm, BitMap* card_bm) :
1435     CMCountDataClosureBase(g1h, region_bm, card_bm),
1436     _bm(bm), _region_marked_bytes(0) { }
1437 
1438   bool doHeapRegion(HeapRegion* hr) {
1439 
1440     if (hr->continuesHumongous()) {
1441       // We will ignore these here and process them when their
1442       // associated "starts humongous" region is processed (see
1443       // set_bit_for_heap_region()). Note that we cannot rely on their
1444       // associated "starts humongous" region to have their bit set to
1445       // 1 since, due to the region chunking in the parallel region
1446       // iteration, a "continues humongous" region might be visited
1447       // before its associated "starts humongous".
1448       return false;
1449     }
1450 
1451     HeapWord* ntams = hr->next_top_at_mark_start();
1452     HeapWord* start = hr->bottom();
1453 
1454     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1455            err_msg("Preconditions not met - "
1456                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1457                    p2i(start), p2i(ntams), p2i(hr->end())));
1458 
1459     // Find the first marked object at or after "start".
1460     start = _bm->getNextMarkedWordAddress(start, ntams);
1461 
1462     size_t marked_bytes = 0;
1463 
1464     while (start < ntams) {
1465       oop obj = oop(start);
1466       int obj_sz = obj->size();
1467       HeapWord* obj_end = start + obj_sz;
1468 
1469       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1470       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1471 
1472       // Note: if we're looking at the last region in heap - obj_end
1473       // could be actually just beyond the end of the heap; end_idx
1474       // will then correspond to a (non-existent) card that is also
1475       // just beyond the heap.
1476       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1477         // end of object is not card aligned - increment to cover
1478         // all the cards spanned by the object
1479         end_idx += 1;
1480       }
1481 
1482       // Set the bits in the card BM for the cards spanned by this object.
1483       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1484 
1485       // Add the size of this object to the number of marked bytes.
1486       marked_bytes += (size_t)obj_sz * HeapWordSize;
1487 
1488       // Find the next marked object after this one.
1489       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1490     }
1491 
1492     // Mark the allocated-since-marking portion...
1493     HeapWord* top = hr->top();
1494     if (ntams < top) {
1495       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1496       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1497 
1498       // Note: if we're looking at the last region in heap - top
1499       // could be actually just beyond the end of the heap; end_idx
1500       // will then correspond to a (non-existent) card that is also
1501       // just beyond the heap.
1502       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1503         // end of object is not card aligned - increment to cover
1504         // all the cards spanned by the object
1505         end_idx += 1;
1506       }
1507       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1508 
1509       // This definitely means the region has live objects.
1510       set_bit_for_region(hr);
1511     }
1512 
1513     // Update the live region bitmap.
1514     if (marked_bytes > 0) {
1515       set_bit_for_region(hr);
1516     }
1517 
1518     // Set the marked bytes for the current region so that
1519     // it can be queried by a calling verificiation routine
1520     _region_marked_bytes = marked_bytes;
1521 
1522     return false;
1523   }
1524 
1525   size_t region_marked_bytes() const { return _region_marked_bytes; }
1526 };
1527 
1528 // Heap region closure used for verifying the counting data
1529 // that was accumulated concurrently and aggregated during
1530 // the remark pause. This closure is applied to the heap
1531 // regions during the STW cleanup pause.
1532 
1533 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1534   G1CollectedHeap* _g1h;
1535   ConcurrentMark* _cm;
1536   CalcLiveObjectsClosure _calc_cl;
1537   BitMap* _region_bm;   // Region BM to be verified
1538   BitMap* _card_bm;     // Card BM to be verified
1539   bool _verbose;        // verbose output?
1540 
1541   BitMap* _exp_region_bm; // Expected Region BM values
1542   BitMap* _exp_card_bm;   // Expected card BM values
1543 
1544   int _failures;
1545 
1546 public:
1547   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1548                                 BitMap* region_bm,
1549                                 BitMap* card_bm,
1550                                 BitMap* exp_region_bm,
1551                                 BitMap* exp_card_bm,
1552                                 bool verbose) :
1553     _g1h(g1h), _cm(g1h->concurrent_mark()),
1554     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1555     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1556     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1557     _failures(0) { }
1558 
1559   int failures() const { return _failures; }
1560 
1561   bool doHeapRegion(HeapRegion* hr) {
1562     if (hr->continuesHumongous()) {
1563       // We will ignore these here and process them when their
1564       // associated "starts humongous" region is processed (see
1565       // set_bit_for_heap_region()). Note that we cannot rely on their
1566       // associated "starts humongous" region to have their bit set to
1567       // 1 since, due to the region chunking in the parallel region
1568       // iteration, a "continues humongous" region might be visited
1569       // before its associated "starts humongous".
1570       return false;
1571     }
1572 
1573     int failures = 0;
1574 
1575     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1576     // this region and set the corresponding bits in the expected region
1577     // and card bitmaps.
1578     bool res = _calc_cl.doHeapRegion(hr);
1579     assert(res == false, "should be continuing");
1580 
1581     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1582                     Mutex::_no_safepoint_check_flag);
1583 
1584     // Verify the marked bytes for this region.
1585     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1586     size_t act_marked_bytes = hr->next_marked_bytes();
1587 
1588     // We're not OK if expected marked bytes > actual marked bytes. It means
1589     // we have missed accounting some objects during the actual marking.
1590     if (exp_marked_bytes > act_marked_bytes) {
1591       if (_verbose) {
1592         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1593                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1594                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
1595       }
1596       failures += 1;
1597     }
1598 
1599     // Verify the bit, for this region, in the actual and expected
1600     // (which was just calculated) region bit maps.
1601     // We're not OK if the bit in the calculated expected region
1602     // bitmap is set and the bit in the actual region bitmap is not.
1603     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1604 
1605     bool expected = _exp_region_bm->at(index);
1606     bool actual = _region_bm->at(index);
1607     if (expected && !actual) {
1608       if (_verbose) {
1609         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1610                                "expected: %s, actual: %s",
1611                                hr->hrm_index(),
1612                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1613       }
1614       failures += 1;
1615     }
1616 
1617     // Verify that the card bit maps for the cards spanned by the current
1618     // region match. We have an error if we have a set bit in the expected
1619     // bit map and the corresponding bit in the actual bitmap is not set.
1620 
1621     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1622     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1623 
1624     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1625       expected = _exp_card_bm->at(i);
1626       actual = _card_bm->at(i);
1627 
1628       if (expected && !actual) {
1629         if (_verbose) {
1630           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1631                                  "expected: %s, actual: %s",
1632                                  hr->hrm_index(), i,
1633                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1634         }
1635         failures += 1;
1636       }
1637     }
1638 
1639     if (failures > 0 && _verbose)  {
1640       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1641                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1642                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1643                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1644     }
1645 
1646     _failures += failures;
1647 
1648     // We could stop iteration over the heap when we
1649     // find the first violating region by returning true.
1650     return false;
1651   }
1652 };
1653 
1654 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1655 protected:
1656   G1CollectedHeap* _g1h;
1657   ConcurrentMark* _cm;
1658   BitMap* _actual_region_bm;
1659   BitMap* _actual_card_bm;
1660 
1661   uint    _n_workers;
1662 
1663   BitMap* _expected_region_bm;
1664   BitMap* _expected_card_bm;
1665 
1666   int  _failures;
1667   bool _verbose;
1668 
1669 public:
1670   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1671                             BitMap* region_bm, BitMap* card_bm,
1672                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1673     : AbstractGangTask("G1 verify final counting"),
1674       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1675       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1676       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1677       _failures(0), _verbose(false),
1678       _n_workers(0) {
1679     assert(VerifyDuringGC, "don't call this otherwise");
1680 
1681     // Use the value already set as the number of active threads
1682     // in the call to run_task().
1683     if (G1CollectedHeap::use_parallel_gc_threads()) {
1684       assert( _g1h->workers()->active_workers() > 0,
1685         "Should have been previously set");
1686       _n_workers = _g1h->workers()->active_workers();
1687     } else {
1688       _n_workers = 1;
1689     }
1690 
1691     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1692     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1693 
1694     _verbose = _cm->verbose_medium();
1695   }
1696 
1697   void work(uint worker_id) {
1698     assert(worker_id < _n_workers, "invariant");
1699 
1700     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1701                                             _actual_region_bm, _actual_card_bm,
1702                                             _expected_region_bm,
1703                                             _expected_card_bm,
1704                                             _verbose);
1705 
1706     if (G1CollectedHeap::use_parallel_gc_threads()) {
1707       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1708                                             worker_id,
1709                                             _n_workers,
1710                                             HeapRegion::VerifyCountClaimValue);
1711     } else {
1712       _g1h->heap_region_iterate(&verify_cl);
1713     }
1714 
1715     Atomic::add(verify_cl.failures(), &_failures);
1716   }
1717 
1718   int failures() const { return _failures; }
1719 };
1720 
1721 // Closure that finalizes the liveness counting data.
1722 // Used during the cleanup pause.
1723 // Sets the bits corresponding to the interval [NTAMS, top]
1724 // (which contains the implicitly live objects) in the
1725 // card liveness bitmap. Also sets the bit for each region,
1726 // containing live data, in the region liveness bitmap.
1727 
1728 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1729  public:
1730   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1731                               BitMap* region_bm,
1732                               BitMap* card_bm) :
1733     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1734 
1735   bool doHeapRegion(HeapRegion* hr) {
1736 
1737     if (hr->continuesHumongous()) {
1738       // We will ignore these here and process them when their
1739       // associated "starts humongous" region is processed (see
1740       // set_bit_for_heap_region()). Note that we cannot rely on their
1741       // associated "starts humongous" region to have their bit set to
1742       // 1 since, due to the region chunking in the parallel region
1743       // iteration, a "continues humongous" region might be visited
1744       // before its associated "starts humongous".
1745       return false;
1746     }
1747 
1748     HeapWord* ntams = hr->next_top_at_mark_start();
1749     HeapWord* top   = hr->top();
1750 
1751     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1752 
1753     // Mark the allocated-since-marking portion...
1754     if (ntams < top) {
1755       // This definitely means the region has live objects.
1756       set_bit_for_region(hr);
1757 
1758       // Now set the bits in the card bitmap for [ntams, top)
1759       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1760       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1761 
1762       // Note: if we're looking at the last region in heap - top
1763       // could be actually just beyond the end of the heap; end_idx
1764       // will then correspond to a (non-existent) card that is also
1765       // just beyond the heap.
1766       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1767         // end of object is not card aligned - increment to cover
1768         // all the cards spanned by the object
1769         end_idx += 1;
1770       }
1771 
1772       assert(end_idx <= _card_bm->size(),
1773              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1774                      end_idx, _card_bm->size()));
1775       assert(start_idx < _card_bm->size(),
1776              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1777                      start_idx, _card_bm->size()));
1778 
1779       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1780     }
1781 
1782     // Set the bit for the region if it contains live data
1783     if (hr->next_marked_bytes() > 0) {
1784       set_bit_for_region(hr);
1785     }
1786 
1787     return false;
1788   }
1789 };
1790 
1791 class G1ParFinalCountTask: public AbstractGangTask {
1792 protected:
1793   G1CollectedHeap* _g1h;
1794   ConcurrentMark* _cm;
1795   BitMap* _actual_region_bm;
1796   BitMap* _actual_card_bm;
1797 
1798   uint    _n_workers;
1799 
1800 public:
1801   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1802     : AbstractGangTask("G1 final counting"),
1803       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1804       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1805       _n_workers(0) {
1806     // Use the value already set as the number of active threads
1807     // in the call to run_task().
1808     if (G1CollectedHeap::use_parallel_gc_threads()) {
1809       assert( _g1h->workers()->active_workers() > 0,
1810         "Should have been previously set");
1811       _n_workers = _g1h->workers()->active_workers();
1812     } else {
1813       _n_workers = 1;
1814     }
1815   }
1816 
1817   void work(uint worker_id) {
1818     assert(worker_id < _n_workers, "invariant");
1819 
1820     FinalCountDataUpdateClosure final_update_cl(_g1h,
1821                                                 _actual_region_bm,
1822                                                 _actual_card_bm);
1823 
1824     if (G1CollectedHeap::use_parallel_gc_threads()) {
1825       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1826                                             worker_id,
1827                                             _n_workers,
1828                                             HeapRegion::FinalCountClaimValue);
1829     } else {
1830       _g1h->heap_region_iterate(&final_update_cl);
1831     }
1832   }
1833 };
1834 
1835 class G1ParNoteEndTask;
1836 
1837 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1838   G1CollectedHeap* _g1;
1839   size_t _max_live_bytes;
1840   uint _regions_claimed;
1841   size_t _freed_bytes;
1842   FreeRegionList* _local_cleanup_list;
1843   HeapRegionSetCount _old_regions_removed;
1844   HeapRegionSetCount _humongous_regions_removed;
1845   HRRSCleanupTask* _hrrs_cleanup_task;
1846   double _claimed_region_time;
1847   double _max_region_time;
1848 
1849 public:
1850   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1851                              FreeRegionList* local_cleanup_list,
1852                              HRRSCleanupTask* hrrs_cleanup_task) :
1853     _g1(g1),
1854     _max_live_bytes(0), _regions_claimed(0),
1855     _freed_bytes(0),
1856     _claimed_region_time(0.0), _max_region_time(0.0),
1857     _local_cleanup_list(local_cleanup_list),
1858     _old_regions_removed(),
1859     _humongous_regions_removed(),
1860     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1861 
1862   size_t freed_bytes() { return _freed_bytes; }
1863   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1864   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1865 
1866   bool doHeapRegion(HeapRegion *hr) {
1867     if (hr->continuesHumongous()) {
1868       return false;
1869     }
1870     // We use a claim value of zero here because all regions
1871     // were claimed with value 1 in the FinalCount task.
1872     _g1->reset_gc_time_stamps(hr);
1873     double start = os::elapsedTime();
1874     _regions_claimed++;
1875     hr->note_end_of_marking();
1876     _max_live_bytes += hr->max_live_bytes();
1877 
1878     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1879       _freed_bytes += hr->used();
1880       hr->set_containing_set(NULL);
1881       if (hr->isHumongous()) {
1882         assert(hr->startsHumongous(), "we should only see starts humongous");
1883         _humongous_regions_removed.increment(1u, hr->capacity());
1884         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1885       } else {
1886         _old_regions_removed.increment(1u, hr->capacity());
1887         _g1->free_region(hr, _local_cleanup_list, true);
1888       }
1889     } else {
1890       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1891     }
1892 
1893     double region_time = (os::elapsedTime() - start);
1894     _claimed_region_time += region_time;
1895     if (region_time > _max_region_time) {
1896       _max_region_time = region_time;
1897     }
1898     return false;
1899   }
1900 
1901   size_t max_live_bytes() { return _max_live_bytes; }
1902   uint regions_claimed() { return _regions_claimed; }
1903   double claimed_region_time_sec() { return _claimed_region_time; }
1904   double max_region_time_sec() { return _max_region_time; }
1905 };
1906 
1907 class G1ParNoteEndTask: public AbstractGangTask {
1908   friend class G1NoteEndOfConcMarkClosure;
1909 
1910 protected:
1911   G1CollectedHeap* _g1h;
1912   size_t _max_live_bytes;
1913   size_t _freed_bytes;
1914   FreeRegionList* _cleanup_list;
1915 
1916 public:
1917   G1ParNoteEndTask(G1CollectedHeap* g1h,
1918                    FreeRegionList* cleanup_list) :
1919     AbstractGangTask("G1 note end"), _g1h(g1h),
1920     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1921 
1922   void work(uint worker_id) {
1923     double start = os::elapsedTime();
1924     FreeRegionList local_cleanup_list("Local Cleanup List");
1925     HRRSCleanupTask hrrs_cleanup_task;
1926     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1927                                            &hrrs_cleanup_task);
1928     if (G1CollectedHeap::use_parallel_gc_threads()) {
1929       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1930                                             _g1h->workers()->active_workers(),
1931                                             HeapRegion::NoteEndClaimValue);
1932     } else {
1933       _g1h->heap_region_iterate(&g1_note_end);
1934     }
1935     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1936 
1937     // Now update the lists
1938     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1939     {
1940       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1941       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1942       _max_live_bytes += g1_note_end.max_live_bytes();
1943       _freed_bytes += g1_note_end.freed_bytes();
1944 
1945       // If we iterate over the global cleanup list at the end of
1946       // cleanup to do this printing we will not guarantee to only
1947       // generate output for the newly-reclaimed regions (the list
1948       // might not be empty at the beginning of cleanup; we might
1949       // still be working on its previous contents). So we do the
1950       // printing here, before we append the new regions to the global
1951       // cleanup list.
1952 
1953       G1HRPrinter* hr_printer = _g1h->hr_printer();
1954       if (hr_printer->is_active()) {
1955         FreeRegionListIterator iter(&local_cleanup_list);
1956         while (iter.more_available()) {
1957           HeapRegion* hr = iter.get_next();
1958           hr_printer->cleanup(hr);
1959         }
1960       }
1961 
1962       _cleanup_list->add_ordered(&local_cleanup_list);
1963       assert(local_cleanup_list.is_empty(), "post-condition");
1964 
1965       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1966     }
1967   }
1968   size_t max_live_bytes() { return _max_live_bytes; }
1969   size_t freed_bytes() { return _freed_bytes; }
1970 };
1971 
1972 class G1ParScrubRemSetTask: public AbstractGangTask {
1973 protected:
1974   G1RemSet* _g1rs;
1975   BitMap* _region_bm;
1976   BitMap* _card_bm;
1977 public:
1978   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1979                        BitMap* region_bm, BitMap* card_bm) :
1980     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1981     _region_bm(region_bm), _card_bm(card_bm) { }
1982 
1983   void work(uint worker_id) {
1984     if (G1CollectedHeap::use_parallel_gc_threads()) {
1985       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1986                        HeapRegion::ScrubRemSetClaimValue);
1987     } else {
1988       _g1rs->scrub(_region_bm, _card_bm);
1989     }
1990   }
1991 
1992 };
1993 
1994 void ConcurrentMark::cleanup() {
1995   // world is stopped at this checkpoint
1996   assert(SafepointSynchronize::is_at_safepoint(),
1997          "world should be stopped");
1998   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1999 
2000   // If a full collection has happened, we shouldn't do this.
2001   if (has_aborted()) {
2002     g1h->set_marking_complete(); // So bitmap clearing isn't confused
2003     return;
2004   }
2005 
2006   g1h->verify_region_sets_optional();
2007 
2008   if (VerifyDuringGC) {
2009     HandleMark hm;  // handle scope
2010     Universe::heap()->prepare_for_verify();
2011     Universe::verify(VerifyOption_G1UsePrevMarking,
2012                      " VerifyDuringGC:(before)");
2013   }
2014   g1h->check_bitmaps("Cleanup Start");
2015 
2016   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2017   g1p->record_concurrent_mark_cleanup_start();
2018 
2019   double start = os::elapsedTime();
2020 
2021   HeapRegionRemSet::reset_for_cleanup_tasks();
2022 
2023   uint n_workers;
2024 
2025   // Do counting once more with the world stopped for good measure.
2026   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2027 
2028   if (G1CollectedHeap::use_parallel_gc_threads()) {
2029    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2030            "sanity check");
2031 
2032     g1h->set_par_threads();
2033     n_workers = g1h->n_par_threads();
2034     assert(g1h->n_par_threads() == n_workers,
2035            "Should not have been reset");
2036     g1h->workers()->run_task(&g1_par_count_task);
2037     // Done with the parallel phase so reset to 0.
2038     g1h->set_par_threads(0);
2039 
2040     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2041            "sanity check");
2042   } else {
2043     n_workers = 1;
2044     g1_par_count_task.work(0);
2045   }
2046 
2047   if (VerifyDuringGC) {
2048     // Verify that the counting data accumulated during marking matches
2049     // that calculated by walking the marking bitmap.
2050 
2051     // Bitmaps to hold expected values
2052     BitMap expected_region_bm(_region_bm.size(), true);
2053     BitMap expected_card_bm(_card_bm.size(), true);
2054 
2055     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2056                                                  &_region_bm,
2057                                                  &_card_bm,
2058                                                  &expected_region_bm,
2059                                                  &expected_card_bm);
2060 
2061     if (G1CollectedHeap::use_parallel_gc_threads()) {
2062       g1h->set_par_threads((int)n_workers);
2063       g1h->workers()->run_task(&g1_par_verify_task);
2064       // Done with the parallel phase so reset to 0.
2065       g1h->set_par_threads(0);
2066 
2067       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2068              "sanity check");
2069     } else {
2070       g1_par_verify_task.work(0);
2071     }
2072 
2073     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2074   }
2075 
2076   size_t start_used_bytes = g1h->used();
2077   g1h->set_marking_complete();
2078 
2079   double count_end = os::elapsedTime();
2080   double this_final_counting_time = (count_end - start);
2081   _total_counting_time += this_final_counting_time;
2082 
2083   if (G1PrintRegionLivenessInfo) {
2084     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2085     _g1h->heap_region_iterate(&cl);
2086   }
2087 
2088   // Install newly created mark bitMap as "prev".
2089   swapMarkBitMaps();
2090 
2091   g1h->reset_gc_time_stamp();
2092 
2093   // Note end of marking in all heap regions.
2094   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2095   if (G1CollectedHeap::use_parallel_gc_threads()) {
2096     g1h->set_par_threads((int)n_workers);
2097     g1h->workers()->run_task(&g1_par_note_end_task);
2098     g1h->set_par_threads(0);
2099 
2100     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2101            "sanity check");
2102   } else {
2103     g1_par_note_end_task.work(0);
2104   }
2105   g1h->check_gc_time_stamps();
2106 
2107   if (!cleanup_list_is_empty()) {
2108     // The cleanup list is not empty, so we'll have to process it
2109     // concurrently. Notify anyone else that might be wanting free
2110     // regions that there will be more free regions coming soon.
2111     g1h->set_free_regions_coming();
2112   }
2113 
2114   // call below, since it affects the metric by which we sort the heap
2115   // regions.
2116   if (G1ScrubRemSets) {
2117     double rs_scrub_start = os::elapsedTime();
2118     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2119     if (G1CollectedHeap::use_parallel_gc_threads()) {
2120       g1h->set_par_threads((int)n_workers);
2121       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2122       g1h->set_par_threads(0);
2123 
2124       assert(g1h->check_heap_region_claim_values(
2125                                             HeapRegion::ScrubRemSetClaimValue),
2126              "sanity check");
2127     } else {
2128       g1_par_scrub_rs_task.work(0);
2129     }
2130 
2131     double rs_scrub_end = os::elapsedTime();
2132     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2133     _total_rs_scrub_time += this_rs_scrub_time;
2134   }
2135 
2136   // this will also free any regions totally full of garbage objects,
2137   // and sort the regions.
2138   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2139 
2140   // Statistics.
2141   double end = os::elapsedTime();
2142   _cleanup_times.add((end - start) * 1000.0);
2143 
2144   if (G1Log::fine()) {
2145     g1h->print_size_transition(gclog_or_tty,
2146                                start_used_bytes,
2147                                g1h->used(),
2148                                g1h->capacity());
2149   }
2150 
2151   // Clean up will have freed any regions completely full of garbage.
2152   // Update the soft reference policy with the new heap occupancy.
2153   Universe::update_heap_info_at_gc();
2154 
2155   if (VerifyDuringGC) {
2156     HandleMark hm;  // handle scope
2157     Universe::heap()->prepare_for_verify();
2158     Universe::verify(VerifyOption_G1UsePrevMarking,
2159                      " VerifyDuringGC:(after)");
2160   }
2161   g1h->check_bitmaps("Cleanup End");
2162 
2163   g1h->verify_region_sets_optional();
2164 
2165   // We need to make this be a "collection" so any collection pause that
2166   // races with it goes around and waits for completeCleanup to finish.
2167   g1h->increment_total_collections();
2168 
2169   // Clean out dead classes and update Metaspace sizes.
2170   if (ClassUnloadingWithConcurrentMark) {
2171     ClassLoaderDataGraph::purge();
2172   }
2173   MetaspaceGC::compute_new_size();
2174 
2175   // We reclaimed old regions so we should calculate the sizes to make
2176   // sure we update the old gen/space data.
2177   g1h->g1mm()->update_sizes();
2178   g1h->allocation_context_stats().update_after_mark();
2179 
2180   g1h->trace_heap_after_concurrent_cycle();
2181 }
2182 
2183 void ConcurrentMark::completeCleanup() {
2184   if (has_aborted()) return;
2185 
2186   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2187 
2188   _cleanup_list.verify_optional();
2189   FreeRegionList tmp_free_list("Tmp Free List");
2190 
2191   if (G1ConcRegionFreeingVerbose) {
2192     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2193                            "cleanup list has %u entries",
2194                            _cleanup_list.length());
2195   }
2196 
2197   // No one else should be accessing the _cleanup_list at this point,
2198   // so it is not necessary to take any locks
2199   while (!_cleanup_list.is_empty()) {
2200     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2201     assert(hr != NULL, "Got NULL from a non-empty list");
2202     hr->par_clear();
2203     tmp_free_list.add_ordered(hr);
2204 
2205     // Instead of adding one region at a time to the secondary_free_list,
2206     // we accumulate them in the local list and move them a few at a
2207     // time. This also cuts down on the number of notify_all() calls
2208     // we do during this process. We'll also append the local list when
2209     // _cleanup_list is empty (which means we just removed the last
2210     // region from the _cleanup_list).
2211     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2212         _cleanup_list.is_empty()) {
2213       if (G1ConcRegionFreeingVerbose) {
2214         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2215                                "appending %u entries to the secondary_free_list, "
2216                                "cleanup list still has %u entries",
2217                                tmp_free_list.length(),
2218                                _cleanup_list.length());
2219       }
2220 
2221       {
2222         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2223         g1h->secondary_free_list_add(&tmp_free_list);
2224         SecondaryFreeList_lock->notify_all();
2225       }
2226 
2227       if (G1StressConcRegionFreeing) {
2228         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2229           os::sleep(Thread::current(), (jlong) 1, false);
2230         }
2231       }
2232     }
2233   }
2234   assert(tmp_free_list.is_empty(), "post-condition");
2235 }
2236 
2237 // Supporting Object and Oop closures for reference discovery
2238 // and processing in during marking
2239 
2240 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2241   HeapWord* addr = (HeapWord*)obj;
2242   return addr != NULL &&
2243          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2244 }
2245 
2246 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2247 // Uses the CMTask associated with a worker thread (for serial reference
2248 // processing the CMTask for worker 0 is used) to preserve (mark) and
2249 // trace referent objects.
2250 //
2251 // Using the CMTask and embedded local queues avoids having the worker
2252 // threads operating on the global mark stack. This reduces the risk
2253 // of overflowing the stack - which we would rather avoid at this late
2254 // state. Also using the tasks' local queues removes the potential
2255 // of the workers interfering with each other that could occur if
2256 // operating on the global stack.
2257 
2258 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2259   ConcurrentMark* _cm;
2260   CMTask*         _task;
2261   int             _ref_counter_limit;
2262   int             _ref_counter;
2263   bool            _is_serial;
2264  public:
2265   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2266     _cm(cm), _task(task), _is_serial(is_serial),
2267     _ref_counter_limit(G1RefProcDrainInterval) {
2268     assert(_ref_counter_limit > 0, "sanity");
2269     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2270     _ref_counter = _ref_counter_limit;
2271   }
2272 
2273   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2274   virtual void do_oop(      oop* p) { do_oop_work(p); }
2275 
2276   template <class T> void do_oop_work(T* p) {
2277     if (!_cm->has_overflown()) {
2278       oop obj = oopDesc::load_decode_heap_oop(p);
2279       if (_cm->verbose_high()) {
2280         gclog_or_tty->print_cr("\t[%u] we're looking at location "
2281                                "*"PTR_FORMAT" = "PTR_FORMAT,
2282                                _task->worker_id(), p2i(p), p2i((void*) obj));
2283       }
2284 
2285       _task->deal_with_reference(obj);
2286       _ref_counter--;
2287 
2288       if (_ref_counter == 0) {
2289         // We have dealt with _ref_counter_limit references, pushing them
2290         // and objects reachable from them on to the local stack (and
2291         // possibly the global stack). Call CMTask::do_marking_step() to
2292         // process these entries.
2293         //
2294         // We call CMTask::do_marking_step() in a loop, which we'll exit if
2295         // there's nothing more to do (i.e. we're done with the entries that
2296         // were pushed as a result of the CMTask::deal_with_reference() calls
2297         // above) or we overflow.
2298         //
2299         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2300         // flag while there may still be some work to do. (See the comment at
2301         // the beginning of CMTask::do_marking_step() for those conditions -
2302         // one of which is reaching the specified time target.) It is only
2303         // when CMTask::do_marking_step() returns without setting the
2304         // has_aborted() flag that the marking step has completed.
2305         do {
2306           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2307           _task->do_marking_step(mark_step_duration_ms,
2308                                  false      /* do_termination */,
2309                                  _is_serial);
2310         } while (_task->has_aborted() && !_cm->has_overflown());
2311         _ref_counter = _ref_counter_limit;
2312       }
2313     } else {
2314       if (_cm->verbose_high()) {
2315          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2316       }
2317     }
2318   }
2319 };
2320 
2321 // 'Drain' oop closure used by both serial and parallel reference processing.
2322 // Uses the CMTask associated with a given worker thread (for serial
2323 // reference processing the CMtask for worker 0 is used). Calls the
2324 // do_marking_step routine, with an unbelievably large timeout value,
2325 // to drain the marking data structures of the remaining entries
2326 // added by the 'keep alive' oop closure above.
2327 
2328 class G1CMDrainMarkingStackClosure: public VoidClosure {
2329   ConcurrentMark* _cm;
2330   CMTask*         _task;
2331   bool            _is_serial;
2332  public:
2333   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2334     _cm(cm), _task(task), _is_serial(is_serial) {
2335     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2336   }
2337 
2338   void do_void() {
2339     do {
2340       if (_cm->verbose_high()) {
2341         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2342                                _task->worker_id(), BOOL_TO_STR(_is_serial));
2343       }
2344 
2345       // We call CMTask::do_marking_step() to completely drain the local
2346       // and global marking stacks of entries pushed by the 'keep alive'
2347       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2348       //
2349       // CMTask::do_marking_step() is called in a loop, which we'll exit
2350       // if there's nothing more to do (i.e. we'completely drained the
2351       // entries that were pushed as a a result of applying the 'keep alive'
2352       // closure to the entries on the discovered ref lists) or we overflow
2353       // the global marking stack.
2354       //
2355       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2356       // flag while there may still be some work to do. (See the comment at
2357       // the beginning of CMTask::do_marking_step() for those conditions -
2358       // one of which is reaching the specified time target.) It is only
2359       // when CMTask::do_marking_step() returns without setting the
2360       // has_aborted() flag that the marking step has completed.
2361 
2362       _task->do_marking_step(1000000000.0 /* something very large */,
2363                              true         /* do_termination */,
2364                              _is_serial);
2365     } while (_task->has_aborted() && !_cm->has_overflown());
2366   }
2367 };
2368 
2369 // Implementation of AbstractRefProcTaskExecutor for parallel
2370 // reference processing at the end of G1 concurrent marking
2371 
2372 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2373 private:
2374   G1CollectedHeap* _g1h;
2375   ConcurrentMark*  _cm;
2376   WorkGang*        _workers;
2377   int              _active_workers;
2378 
2379 public:
2380   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2381                         ConcurrentMark* cm,
2382                         WorkGang* workers,
2383                         int n_workers) :
2384     _g1h(g1h), _cm(cm),
2385     _workers(workers), _active_workers(n_workers) { }
2386 
2387   // Executes the given task using concurrent marking worker threads.
2388   virtual void execute(ProcessTask& task);
2389   virtual void execute(EnqueueTask& task);
2390 };
2391 
2392 class G1CMRefProcTaskProxy: public AbstractGangTask {
2393   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2394   ProcessTask&     _proc_task;
2395   G1CollectedHeap* _g1h;
2396   ConcurrentMark*  _cm;
2397 
2398 public:
2399   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2400                      G1CollectedHeap* g1h,
2401                      ConcurrentMark* cm) :
2402     AbstractGangTask("Process reference objects in parallel"),
2403     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2404     ReferenceProcessor* rp = _g1h->ref_processor_cm();
2405     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2406   }
2407 
2408   virtual void work(uint worker_id) {
2409     ResourceMark rm;
2410     HandleMark hm;
2411     CMTask* task = _cm->task(worker_id);
2412     G1CMIsAliveClosure g1_is_alive(_g1h);
2413     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2414     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2415 
2416     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2417   }
2418 };
2419 
2420 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2421   assert(_workers != NULL, "Need parallel worker threads.");
2422   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2423 
2424   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2425 
2426   // We need to reset the concurrency level before each
2427   // proxy task execution, so that the termination protocol
2428   // and overflow handling in CMTask::do_marking_step() knows
2429   // how many workers to wait for.
2430   _cm->set_concurrency(_active_workers);
2431   _g1h->set_par_threads(_active_workers);
2432   _workers->run_task(&proc_task_proxy);
2433   _g1h->set_par_threads(0);
2434 }
2435 
2436 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2437   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2438   EnqueueTask& _enq_task;
2439 
2440 public:
2441   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2442     AbstractGangTask("Enqueue reference objects in parallel"),
2443     _enq_task(enq_task) { }
2444 
2445   virtual void work(uint worker_id) {
2446     _enq_task.work(worker_id);
2447   }
2448 };
2449 
2450 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2451   assert(_workers != NULL, "Need parallel worker threads.");
2452   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2453 
2454   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2455 
2456   // Not strictly necessary but...
2457   //
2458   // We need to reset the concurrency level before each
2459   // proxy task execution, so that the termination protocol
2460   // and overflow handling in CMTask::do_marking_step() knows
2461   // how many workers to wait for.
2462   _cm->set_concurrency(_active_workers);
2463   _g1h->set_par_threads(_active_workers);
2464   _workers->run_task(&enq_task_proxy);
2465   _g1h->set_par_threads(0);
2466 }
2467 
2468 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2469   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2470 }
2471 
2472 // Helper class to get rid of some boilerplate code.
2473 class G1RemarkGCTraceTime : public GCTraceTime {
2474   static bool doit_and_prepend(bool doit) {
2475     if (doit) {
2476       gclog_or_tty->put(' ');
2477     }
2478     return doit;
2479   }
2480 
2481  public:
2482   G1RemarkGCTraceTime(const char* title, bool doit)
2483     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2484         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2485   }
2486 };
2487 
2488 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2489   if (has_overflown()) {
2490     // Skip processing the discovered references if we have
2491     // overflown the global marking stack. Reference objects
2492     // only get discovered once so it is OK to not
2493     // de-populate the discovered reference lists. We could have,
2494     // but the only benefit would be that, when marking restarts,
2495     // less reference objects are discovered.
2496     return;
2497   }
2498 
2499   ResourceMark rm;
2500   HandleMark   hm;
2501 
2502   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2503 
2504   // Is alive closure.
2505   G1CMIsAliveClosure g1_is_alive(g1h);
2506 
2507   // Inner scope to exclude the cleaning of the string and symbol
2508   // tables from the displayed time.
2509   {
2510     if (G1Log::finer()) {
2511       gclog_or_tty->put(' ');
2512     }
2513     GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
2514 
2515     ReferenceProcessor* rp = g1h->ref_processor_cm();
2516 
2517     // See the comment in G1CollectedHeap::ref_processing_init()
2518     // about how reference processing currently works in G1.
2519 
2520     // Set the soft reference policy
2521     rp->setup_policy(clear_all_soft_refs);
2522     assert(_markStack.isEmpty(), "mark stack should be empty");
2523 
2524     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2525     // in serial reference processing. Note these closures are also
2526     // used for serially processing (by the the current thread) the
2527     // JNI references during parallel reference processing.
2528     //
2529     // These closures do not need to synchronize with the worker
2530     // threads involved in parallel reference processing as these
2531     // instances are executed serially by the current thread (e.g.
2532     // reference processing is not multi-threaded and is thus
2533     // performed by the current thread instead of a gang worker).
2534     //
2535     // The gang tasks involved in parallel reference procssing create
2536     // their own instances of these closures, which do their own
2537     // synchronization among themselves.
2538     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2539     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2540 
2541     // We need at least one active thread. If reference processing
2542     // is not multi-threaded we use the current (VMThread) thread,
2543     // otherwise we use the work gang from the G1CollectedHeap and
2544     // we utilize all the worker threads we can.
2545     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2546     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2547     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2548 
2549     // Parallel processing task executor.
2550     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2551                                               g1h->workers(), active_workers);
2552     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2553 
2554     // Set the concurrency level. The phase was already set prior to
2555     // executing the remark task.
2556     set_concurrency(active_workers);
2557 
2558     // Set the degree of MT processing here.  If the discovery was done MT,
2559     // the number of threads involved during discovery could differ from
2560     // the number of active workers.  This is OK as long as the discovered
2561     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2562     rp->set_active_mt_degree(active_workers);
2563 
2564     // Process the weak references.
2565     const ReferenceProcessorStats& stats =
2566         rp->process_discovered_references(&g1_is_alive,
2567                                           &g1_keep_alive,
2568                                           &g1_drain_mark_stack,
2569                                           executor,
2570                                           g1h->gc_timer_cm(),
2571                                           concurrent_gc_id());
2572     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2573 
2574     // The do_oop work routines of the keep_alive and drain_marking_stack
2575     // oop closures will set the has_overflown flag if we overflow the
2576     // global marking stack.
2577 
2578     assert(_markStack.overflow() || _markStack.isEmpty(),
2579             "mark stack should be empty (unless it overflowed)");
2580 
2581     if (_markStack.overflow()) {
2582       // This should have been done already when we tried to push an
2583       // entry on to the global mark stack. But let's do it again.
2584       set_has_overflown();
2585     }
2586 
2587     assert(rp->num_q() == active_workers, "why not");
2588 
2589     rp->enqueue_discovered_references(executor);
2590 
2591     rp->verify_no_references_recorded();
2592     assert(!rp->discovery_enabled(), "Post condition");
2593   }
2594 
2595   if (has_overflown()) {
2596     // We can not trust g1_is_alive if the marking stack overflowed
2597     return;
2598   }
2599 
2600   assert(_markStack.isEmpty(), "Marking should have completed");
2601 
2602   // Unload Klasses, String, Symbols, Code Cache, etc.
2603   {
2604     G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2605 
2606     if (ClassUnloadingWithConcurrentMark) {
2607       // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
2608       // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
2609       // Defer the cleaning until we have complete on_stack data.
2610       MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
2611 
2612       bool purged_classes;
2613 
2614       {
2615         G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2616         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2617       }
2618 
2619       {
2620         G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2621         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2622       }
2623 
2624       {
2625         G1RemarkGCTraceTime trace("Deallocate Metadata", G1Log::finest());
2626         ClassLoaderDataGraph::free_deallocate_lists();
2627       }
2628     }
2629 
2630     if (G1StringDedup::is_enabled()) {
2631       G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2632       G1StringDedup::unlink(&g1_is_alive);
2633     }
2634   }
2635 }
2636 
2637 void ConcurrentMark::swapMarkBitMaps() {
2638   CMBitMapRO* temp = _prevMarkBitMap;
2639   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2640   _nextMarkBitMap  = (CMBitMap*)  temp;
2641 }
2642 
2643 // Closure for marking entries in SATB buffers.
2644 class CMSATBBufferClosure : public SATBBufferClosure {
2645 private:
2646   CMTask* _task;
2647   G1CollectedHeap* _g1h;
2648 
2649   // This is very similar to CMTask::deal_with_reference, but with
2650   // more relaxed requirements for the argument, so this must be more
2651   // circumspect about treating the argument as an object.
2652   void do_entry(void* entry) const {
2653     _task->increment_refs_reached();
2654     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2655     if (entry < hr->next_top_at_mark_start()) {
2656       // Until we get here, we don't know whether entry refers to a valid
2657       // object; it could instead have been a stale reference.
2658       oop obj = static_cast<oop>(entry);
2659       assert(obj->is_oop(true /* ignore mark word */),
2660              err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
2661       _task->make_reference_grey(obj, hr);
2662     }
2663   }
2664 
2665 public:
2666   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2667     : _task(task), _g1h(g1h) { }
2668 
2669   virtual void do_buffer(void** buffer, size_t size) {
2670     for (size_t i = 0; i < size; ++i) {
2671       do_entry(buffer[i]);
2672     }
2673   }
2674 };
2675 
2676 class G1RemarkThreadsClosure : public ThreadClosure {
2677   CMSATBBufferClosure _cm_satb_cl;
2678   G1CMOopClosure _cm_cl;
2679   MarkingCodeBlobClosure _code_cl;
2680   int _thread_parity;
2681   bool _is_par;
2682 
2683  public:
2684   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2685     _cm_satb_cl(task, g1h),
2686     _cm_cl(g1h, g1h->concurrent_mark(), task),
2687     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2688     _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2689 
2690   void do_thread(Thread* thread) {
2691     if (thread->is_Java_thread()) {
2692       if (thread->claim_oops_do(_is_par, _thread_parity)) {
2693         JavaThread* jt = (JavaThread*)thread;
2694 
2695         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2696         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2697         // * Alive if on the stack of an executing method
2698         // * Weakly reachable otherwise
2699         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2700         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2701         jt->nmethods_do(&_code_cl);
2702 
2703         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
2704       }
2705     } else if (thread->is_VM_thread()) {
2706       if (thread->claim_oops_do(_is_par, _thread_parity)) {
2707         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
2708       }
2709     }
2710   }
2711 };
2712 
2713 class CMRemarkTask: public AbstractGangTask {
2714 private:
2715   ConcurrentMark* _cm;
2716   bool            _is_serial;
2717 public:
2718   void work(uint worker_id) {
2719     // Since all available tasks are actually started, we should
2720     // only proceed if we're supposed to be actived.
2721     if (worker_id < _cm->active_tasks()) {
2722       CMTask* task = _cm->task(worker_id);
2723       task->record_start_time();
2724       {
2725         ResourceMark rm;
2726         HandleMark hm;
2727 
2728         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2729         Threads::threads_do(&threads_f);
2730       }
2731 
2732       do {
2733         task->do_marking_step(1000000000.0 /* something very large */,
2734                               true         /* do_termination       */,
2735                               _is_serial);
2736       } while (task->has_aborted() && !_cm->has_overflown());
2737       // If we overflow, then we do not want to restart. We instead
2738       // want to abort remark and do concurrent marking again.
2739       task->record_end_time();
2740     }
2741   }
2742 
2743   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2744     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2745     _cm->terminator()->reset_for_reuse(active_workers);
2746   }
2747 };
2748 
2749 void ConcurrentMark::checkpointRootsFinalWork() {
2750   ResourceMark rm;
2751   HandleMark   hm;
2752   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2753 
2754   G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2755 
2756   g1h->ensure_parsability(false);
2757 
2758   if (G1CollectedHeap::use_parallel_gc_threads()) {
2759     G1CollectedHeap::StrongRootsScope srs(g1h);
2760     // this is remark, so we'll use up all active threads
2761     uint active_workers = g1h->workers()->active_workers();
2762     if (active_workers == 0) {
2763       assert(active_workers > 0, "Should have been set earlier");
2764       active_workers = (uint) ParallelGCThreads;
2765       g1h->workers()->set_active_workers(active_workers);
2766     }
2767     set_concurrency_and_phase(active_workers, false /* concurrent */);
2768     // Leave _parallel_marking_threads at it's
2769     // value originally calculated in the ConcurrentMark
2770     // constructor and pass values of the active workers
2771     // through the gang in the task.
2772 
2773     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2774     // We will start all available threads, even if we decide that the
2775     // active_workers will be fewer. The extra ones will just bail out
2776     // immediately.
2777     g1h->set_par_threads(active_workers);
2778     g1h->workers()->run_task(&remarkTask);
2779     g1h->set_par_threads(0);
2780   } else {
2781     G1CollectedHeap::StrongRootsScope srs(g1h);
2782     uint active_workers = 1;
2783     set_concurrency_and_phase(active_workers, false /* concurrent */);
2784 
2785     // Note - if there's no work gang then the VMThread will be
2786     // the thread to execute the remark - serially. We have
2787     // to pass true for the is_serial parameter so that
2788     // CMTask::do_marking_step() doesn't enter the sync
2789     // barriers in the event of an overflow. Doing so will
2790     // cause an assert that the current thread is not a
2791     // concurrent GC thread.
2792     CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2793     remarkTask.work(0);
2794   }
2795   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2796   guarantee(has_overflown() ||
2797             satb_mq_set.completed_buffers_num() == 0,
2798             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2799                     BOOL_TO_STR(has_overflown()),
2800                     satb_mq_set.completed_buffers_num()));
2801 
2802   print_stats();
2803 }
2804 
2805 #ifndef PRODUCT
2806 
2807 class PrintReachableOopClosure: public OopClosure {
2808 private:
2809   G1CollectedHeap* _g1h;
2810   outputStream*    _out;
2811   VerifyOption     _vo;
2812   bool             _all;
2813 
2814 public:
2815   PrintReachableOopClosure(outputStream* out,
2816                            VerifyOption  vo,
2817                            bool          all) :
2818     _g1h(G1CollectedHeap::heap()),
2819     _out(out), _vo(vo), _all(all) { }
2820 
2821   void do_oop(narrowOop* p) { do_oop_work(p); }
2822   void do_oop(      oop* p) { do_oop_work(p); }
2823 
2824   template <class T> void do_oop_work(T* p) {
2825     oop         obj = oopDesc::load_decode_heap_oop(p);
2826     const char* str = NULL;
2827     const char* str2 = "";
2828 
2829     if (obj == NULL) {
2830       str = "";
2831     } else if (!_g1h->is_in_g1_reserved(obj)) {
2832       str = " O";
2833     } else {
2834       HeapRegion* hr  = _g1h->heap_region_containing(obj);
2835       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2836       bool marked = _g1h->is_marked(obj, _vo);
2837 
2838       if (over_tams) {
2839         str = " >";
2840         if (marked) {
2841           str2 = " AND MARKED";
2842         }
2843       } else if (marked) {
2844         str = " M";
2845       } else {
2846         str = " NOT";
2847       }
2848     }
2849 
2850     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
2851                    p2i(p), p2i((void*) obj), str, str2);
2852   }
2853 };
2854 
2855 class PrintReachableObjectClosure : public ObjectClosure {
2856 private:
2857   G1CollectedHeap* _g1h;
2858   outputStream*    _out;
2859   VerifyOption     _vo;
2860   bool             _all;
2861   HeapRegion*      _hr;
2862 
2863 public:
2864   PrintReachableObjectClosure(outputStream* out,
2865                               VerifyOption  vo,
2866                               bool          all,
2867                               HeapRegion*   hr) :
2868     _g1h(G1CollectedHeap::heap()),
2869     _out(out), _vo(vo), _all(all), _hr(hr) { }
2870 
2871   void do_object(oop o) {
2872     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2873     bool marked = _g1h->is_marked(o, _vo);
2874     bool print_it = _all || over_tams || marked;
2875 
2876     if (print_it) {
2877       _out->print_cr(" "PTR_FORMAT"%s",
2878                      p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
2879       PrintReachableOopClosure oopCl(_out, _vo, _all);
2880       o->oop_iterate_no_header(&oopCl);
2881     }
2882   }
2883 };
2884 
2885 class PrintReachableRegionClosure : public HeapRegionClosure {
2886 private:
2887   G1CollectedHeap* _g1h;
2888   outputStream*    _out;
2889   VerifyOption     _vo;
2890   bool             _all;
2891 
2892 public:
2893   bool doHeapRegion(HeapRegion* hr) {
2894     HeapWord* b = hr->bottom();
2895     HeapWord* e = hr->end();
2896     HeapWord* t = hr->top();
2897     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2898     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2899                    "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
2900     _out->cr();
2901 
2902     HeapWord* from = b;
2903     HeapWord* to   = t;
2904 
2905     if (to > from) {
2906       _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
2907       _out->cr();
2908       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2909       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2910       _out->cr();
2911     }
2912 
2913     return false;
2914   }
2915 
2916   PrintReachableRegionClosure(outputStream* out,
2917                               VerifyOption  vo,
2918                               bool          all) :
2919     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2920 };
2921 
2922 void ConcurrentMark::print_reachable(const char* str,
2923                                      VerifyOption vo,
2924                                      bool all) {
2925   gclog_or_tty->cr();
2926   gclog_or_tty->print_cr("== Doing heap dump... ");
2927 
2928   if (G1PrintReachableBaseFile == NULL) {
2929     gclog_or_tty->print_cr("  #### error: no base file defined");
2930     return;
2931   }
2932 
2933   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2934       (JVM_MAXPATHLEN - 1)) {
2935     gclog_or_tty->print_cr("  #### error: file name too long");
2936     return;
2937   }
2938 
2939   char file_name[JVM_MAXPATHLEN];
2940   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2941   gclog_or_tty->print_cr("  dumping to file %s", file_name);
2942 
2943   fileStream fout(file_name);
2944   if (!fout.is_open()) {
2945     gclog_or_tty->print_cr("  #### error: could not open file");
2946     return;
2947   }
2948 
2949   outputStream* out = &fout;
2950   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2951   out->cr();
2952 
2953   out->print_cr("--- ITERATING OVER REGIONS");
2954   out->cr();
2955   PrintReachableRegionClosure rcl(out, vo, all);
2956   _g1h->heap_region_iterate(&rcl);
2957   out->cr();
2958 
2959   gclog_or_tty->print_cr("  done");
2960   gclog_or_tty->flush();
2961 }
2962 
2963 #endif // PRODUCT
2964 
2965 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2966   // Note we are overriding the read-only view of the prev map here, via
2967   // the cast.
2968   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2969 }
2970 
2971 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2972   _nextMarkBitMap->clearRange(mr);
2973 }
2974 
2975 HeapRegion*
2976 ConcurrentMark::claim_region(uint worker_id) {
2977   // "checkpoint" the finger
2978   HeapWord* finger = _finger;
2979 
2980   // _heap_end will not change underneath our feet; it only changes at
2981   // yield points.
2982   while (finger < _heap_end) {
2983     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2984 
2985     // Note on how this code handles humongous regions. In the
2986     // normal case the finger will reach the start of a "starts
2987     // humongous" (SH) region. Its end will either be the end of the
2988     // last "continues humongous" (CH) region in the sequence, or the
2989     // standard end of the SH region (if the SH is the only region in
2990     // the sequence). That way claim_region() will skip over the CH
2991     // regions. However, there is a subtle race between a CM thread
2992     // executing this method and a mutator thread doing a humongous
2993     // object allocation. The two are not mutually exclusive as the CM
2994     // thread does not need to hold the Heap_lock when it gets
2995     // here. So there is a chance that claim_region() will come across
2996     // a free region that's in the progress of becoming a SH or a CH
2997     // region. In the former case, it will either
2998     //   a) Miss the update to the region's end, in which case it will
2999     //      visit every subsequent CH region, will find their bitmaps
3000     //      empty, and do nothing, or
3001     //   b) Will observe the update of the region's end (in which case
3002     //      it will skip the subsequent CH regions).
3003     // If it comes across a region that suddenly becomes CH, the
3004     // scenario will be similar to b). So, the race between
3005     // claim_region() and a humongous object allocation might force us
3006     // to do a bit of unnecessary work (due to some unnecessary bitmap
3007     // iterations) but it should not introduce and correctness issues.
3008     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
3009 
3010     // Above heap_region_containing_raw may return NULL as we always scan claim
3011     // until the end of the heap. In this case, just jump to the next region.
3012     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
3013 
3014     // Is the gap between reading the finger and doing the CAS too long?
3015     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
3016     if (res == finger && curr_region != NULL) {
3017       // we succeeded
3018       HeapWord*   bottom        = curr_region->bottom();
3019       HeapWord*   limit         = curr_region->next_top_at_mark_start();
3020 
3021       if (verbose_low()) {
3022         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
3023                                "["PTR_FORMAT", "PTR_FORMAT"), "
3024                                "limit = "PTR_FORMAT,
3025                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
3026       }
3027 
3028       // notice that _finger == end cannot be guaranteed here since,
3029       // someone else might have moved the finger even further
3030       assert(_finger >= end, "the finger should have moved forward");
3031 
3032       if (verbose_low()) {
3033         gclog_or_tty->print_cr("[%u] we were successful with region = "
3034                                PTR_FORMAT, worker_id, p2i(curr_region));
3035       }
3036 
3037       if (limit > bottom) {
3038         if (verbose_low()) {
3039           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
3040                                  "returning it ", worker_id, p2i(curr_region));
3041         }
3042         return curr_region;
3043       } else {
3044         assert(limit == bottom,
3045                "the region limit should be at bottom");
3046         if (verbose_low()) {
3047           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
3048                                  "returning NULL", worker_id, p2i(curr_region));
3049         }
3050         // we return NULL and the caller should try calling
3051         // claim_region() again.
3052         return NULL;
3053       }
3054     } else {
3055       assert(_finger > finger, "the finger should have moved forward");
3056       if (verbose_low()) {
3057         if (curr_region == NULL) {
3058           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
3059                                  "global finger = "PTR_FORMAT", "
3060                                  "our finger = "PTR_FORMAT,
3061                                  worker_id, p2i(_finger), p2i(finger));
3062         } else {
3063           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3064                                  "global finger = "PTR_FORMAT", "
3065                                  "our finger = "PTR_FORMAT,
3066                                  worker_id, p2i(_finger), p2i(finger));
3067         }
3068       }
3069 
3070       // read it again
3071       finger = _finger;
3072     }
3073   }
3074 
3075   return NULL;
3076 }
3077 
3078 #ifndef PRODUCT
3079 enum VerifyNoCSetOopsPhase {
3080   VerifyNoCSetOopsStack,
3081   VerifyNoCSetOopsQueues
3082 };
3083 
3084 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
3085 private:
3086   G1CollectedHeap* _g1h;
3087   VerifyNoCSetOopsPhase _phase;
3088   int _info;
3089 
3090   const char* phase_str() {
3091     switch (_phase) {
3092     case VerifyNoCSetOopsStack:         return "Stack";
3093     case VerifyNoCSetOopsQueues:        return "Queue";
3094     default:                            ShouldNotReachHere();
3095     }
3096     return NULL;
3097   }
3098 
3099   void do_object_work(oop obj) {
3100     guarantee(!_g1h->obj_in_cs(obj),
3101               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
3102                       p2i((void*) obj), phase_str(), _info));
3103   }
3104 
3105 public:
3106   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
3107 
3108   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3109     _phase = phase;
3110     _info = info;
3111   }
3112 
3113   virtual void do_oop(oop* p) {
3114     oop obj = oopDesc::load_decode_heap_oop(p);
3115     do_object_work(obj);
3116   }
3117 
3118   virtual void do_oop(narrowOop* p) {
3119     // We should not come across narrow oops while scanning marking
3120     // stacks
3121     ShouldNotReachHere();
3122   }
3123 
3124   virtual void do_object(oop obj) {
3125     do_object_work(obj);
3126   }
3127 };
3128 
3129 void ConcurrentMark::verify_no_cset_oops() {
3130   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
3131   if (!G1CollectedHeap::heap()->mark_in_progress()) {
3132     return;
3133   }
3134 
3135   VerifyNoCSetOopsClosure cl;
3136 
3137   // Verify entries on the global mark stack
3138   cl.set_phase(VerifyNoCSetOopsStack);
3139   _markStack.oops_do(&cl);
3140 
3141   // Verify entries on the task queues
3142   for (uint i = 0; i < _max_worker_id; i += 1) {
3143     cl.set_phase(VerifyNoCSetOopsQueues, i);
3144     CMTaskQueue* queue = _task_queues->queue(i);
3145     queue->oops_do(&cl);
3146   }
3147 
3148   // Verify the global finger
3149   HeapWord* global_finger = finger();
3150   if (global_finger != NULL && global_finger < _heap_end) {
3151     // The global finger always points to a heap region boundary. We
3152     // use heap_region_containing_raw() to get the containing region
3153     // given that the global finger could be pointing to a free region
3154     // which subsequently becomes continues humongous. If that
3155     // happens, heap_region_containing() will return the bottom of the
3156     // corresponding starts humongous region and the check below will
3157     // not hold any more.
3158     // Since we always iterate over all regions, we might get a NULL HeapRegion
3159     // here.
3160     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3161     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3162               err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3163                       p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3164   }
3165 
3166   // Verify the task fingers
3167   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3168   for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3169     CMTask* task = _tasks[i];
3170     HeapWord* task_finger = task->finger();
3171     if (task_finger != NULL && task_finger < _heap_end) {
3172       // See above note on the global finger verification.
3173       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3174       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3175                 !task_hr->in_collection_set(),
3176                 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3177                         p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3178     }
3179   }
3180 }
3181 #endif // PRODUCT
3182 
3183 // Aggregate the counting data that was constructed concurrently
3184 // with marking.
3185 class AggregateCountDataHRClosure: public HeapRegionClosure {
3186   G1CollectedHeap* _g1h;
3187   ConcurrentMark* _cm;
3188   CardTableModRefBS* _ct_bs;
3189   BitMap* _cm_card_bm;
3190   uint _max_worker_id;
3191 
3192  public:
3193   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3194                               BitMap* cm_card_bm,
3195                               uint max_worker_id) :
3196     _g1h(g1h), _cm(g1h->concurrent_mark()),
3197     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3198     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3199 
3200   bool doHeapRegion(HeapRegion* hr) {
3201     if (hr->continuesHumongous()) {
3202       // We will ignore these here and process them when their
3203       // associated "starts humongous" region is processed.
3204       // Note that we cannot rely on their associated
3205       // "starts humongous" region to have their bit set to 1
3206       // since, due to the region chunking in the parallel region
3207       // iteration, a "continues humongous" region might be visited
3208       // before its associated "starts humongous".
3209       return false;
3210     }
3211 
3212     HeapWord* start = hr->bottom();
3213     HeapWord* limit = hr->next_top_at_mark_start();
3214     HeapWord* end = hr->end();
3215 
3216     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3217            err_msg("Preconditions not met - "
3218                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3219                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
3220                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3221 
3222     assert(hr->next_marked_bytes() == 0, "Precondition");
3223 
3224     if (start == limit) {
3225       // NTAMS of this region has not been set so nothing to do.
3226       return false;
3227     }
3228 
3229     // 'start' should be in the heap.
3230     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3231     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3232     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3233 
3234     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3235     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3236     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3237 
3238     // If ntams is not card aligned then we bump card bitmap index
3239     // for limit so that we get the all the cards spanned by
3240     // the object ending at ntams.
3241     // Note: if this is the last region in the heap then ntams
3242     // could be actually just beyond the end of the the heap;
3243     // limit_idx will then  correspond to a (non-existent) card
3244     // that is also outside the heap.
3245     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3246       limit_idx += 1;
3247     }
3248 
3249     assert(limit_idx <= end_idx, "or else use atomics");
3250 
3251     // Aggregate the "stripe" in the count data associated with hr.
3252     uint hrm_index = hr->hrm_index();
3253     size_t marked_bytes = 0;
3254 
3255     for (uint i = 0; i < _max_worker_id; i += 1) {
3256       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3257       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3258 
3259       // Fetch the marked_bytes in this region for task i and
3260       // add it to the running total for this region.
3261       marked_bytes += marked_bytes_array[hrm_index];
3262 
3263       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3264       // into the global card bitmap.
3265       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3266 
3267       while (scan_idx < limit_idx) {
3268         assert(task_card_bm->at(scan_idx) == true, "should be");
3269         _cm_card_bm->set_bit(scan_idx);
3270         assert(_cm_card_bm->at(scan_idx) == true, "should be");
3271 
3272         // BitMap::get_next_one_offset() can handle the case when
3273         // its left_offset parameter is greater than its right_offset
3274         // parameter. It does, however, have an early exit if
3275         // left_offset == right_offset. So let's limit the value
3276         // passed in for left offset here.
3277         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3278         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3279       }
3280     }
3281 
3282     // Update the marked bytes for this region.
3283     hr->add_to_marked_bytes(marked_bytes);
3284 
3285     // Next heap region
3286     return false;
3287   }
3288 };
3289 
3290 class G1AggregateCountDataTask: public AbstractGangTask {
3291 protected:
3292   G1CollectedHeap* _g1h;
3293   ConcurrentMark* _cm;
3294   BitMap* _cm_card_bm;
3295   uint _max_worker_id;
3296   int _active_workers;
3297 
3298 public:
3299   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3300                            ConcurrentMark* cm,
3301                            BitMap* cm_card_bm,
3302                            uint max_worker_id,
3303                            int n_workers) :
3304     AbstractGangTask("Count Aggregation"),
3305     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3306     _max_worker_id(max_worker_id),
3307     _active_workers(n_workers) { }
3308 
3309   void work(uint worker_id) {
3310     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3311 
3312     if (G1CollectedHeap::use_parallel_gc_threads()) {
3313       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3314                                             _active_workers,
3315                                             HeapRegion::AggregateCountClaimValue);
3316     } else {
3317       _g1h->heap_region_iterate(&cl);
3318     }
3319   }
3320 };
3321 
3322 
3323 void ConcurrentMark::aggregate_count_data() {
3324   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3325                         _g1h->workers()->active_workers() :
3326                         1);
3327 
3328   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3329                                            _max_worker_id, n_workers);
3330 
3331   if (G1CollectedHeap::use_parallel_gc_threads()) {
3332     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3333            "sanity check");
3334     _g1h->set_par_threads(n_workers);
3335     _g1h->workers()->run_task(&g1_par_agg_task);
3336     _g1h->set_par_threads(0);
3337 
3338     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3339            "sanity check");
3340     _g1h->reset_heap_region_claim_values();
3341   } else {
3342     g1_par_agg_task.work(0);
3343   }
3344 }
3345 
3346 // Clear the per-worker arrays used to store the per-region counting data
3347 void ConcurrentMark::clear_all_count_data() {
3348   // Clear the global card bitmap - it will be filled during
3349   // liveness count aggregation (during remark) and the
3350   // final counting task.
3351   _card_bm.clear();
3352 
3353   // Clear the global region bitmap - it will be filled as part
3354   // of the final counting task.
3355   _region_bm.clear();
3356 
3357   uint max_regions = _g1h->max_regions();
3358   assert(_max_worker_id > 0, "uninitialized");
3359 
3360   for (uint i = 0; i < _max_worker_id; i += 1) {
3361     BitMap* task_card_bm = count_card_bitmap_for(i);
3362     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3363 
3364     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3365     assert(marked_bytes_array != NULL, "uninitialized");
3366 
3367     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3368     task_card_bm->clear();
3369   }
3370 }
3371 
3372 void ConcurrentMark::print_stats() {
3373   if (verbose_stats()) {
3374     gclog_or_tty->print_cr("---------------------------------------------------------------------");
3375     for (size_t i = 0; i < _active_tasks; ++i) {
3376       _tasks[i]->print_stats();
3377       gclog_or_tty->print_cr("---------------------------------------------------------------------");
3378     }
3379   }
3380 }
3381 
3382 // abandon current marking iteration due to a Full GC
3383 void ConcurrentMark::abort() {
3384   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3385   // concurrent bitmap clearing.
3386   _nextMarkBitMap->clearAll();
3387 
3388   // Note we cannot clear the previous marking bitmap here
3389   // since VerifyDuringGC verifies the objects marked during
3390   // a full GC against the previous bitmap.
3391 
3392   // Clear the liveness counting data
3393   clear_all_count_data();
3394   // Empty mark stack
3395   reset_marking_state();
3396   for (uint i = 0; i < _max_worker_id; ++i) {
3397     _tasks[i]->clear_region_fields();
3398   }
3399   _first_overflow_barrier_sync.abort();
3400   _second_overflow_barrier_sync.abort();
3401   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3402   if (!gc_id.is_undefined()) {
3403     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3404     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3405     _aborted_gc_id = gc_id;
3406    }
3407   _has_aborted = true;
3408 
3409   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3410   satb_mq_set.abandon_partial_marking();
3411   // This can be called either during or outside marking, we'll read
3412   // the expected_active value from the SATB queue set.
3413   satb_mq_set.set_active_all_threads(
3414                                  false, /* new active value */
3415                                  satb_mq_set.is_active() /* expected_active */);
3416 
3417   _g1h->trace_heap_after_concurrent_cycle();
3418   _g1h->register_concurrent_cycle_end();
3419 }
3420 
3421 const GCId& ConcurrentMark::concurrent_gc_id() {
3422   if (has_aborted()) {
3423     return _aborted_gc_id;
3424   }
3425   return _g1h->gc_tracer_cm()->gc_id();
3426 }
3427 
3428 static void print_ms_time_info(const char* prefix, const char* name,
3429                                NumberSeq& ns) {
3430   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3431                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3432   if (ns.num() > 0) {
3433     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
3434                            prefix, ns.sd(), ns.maximum());
3435   }
3436 }
3437 
3438 void ConcurrentMark::print_summary_info() {
3439   gclog_or_tty->print_cr(" Concurrent marking:");
3440   print_ms_time_info("  ", "init marks", _init_times);
3441   print_ms_time_info("  ", "remarks", _remark_times);
3442   {
3443     print_ms_time_info("     ", "final marks", _remark_mark_times);
3444     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
3445 
3446   }
3447   print_ms_time_info("  ", "cleanups", _cleanup_times);
3448   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
3449                          _total_counting_time,
3450                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3451                           (double)_cleanup_times.num()
3452                          : 0.0));
3453   if (G1ScrubRemSets) {
3454     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
3455                            _total_rs_scrub_time,
3456                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3457                             (double)_cleanup_times.num()
3458                            : 0.0));
3459   }
3460   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
3461                          (_init_times.sum() + _remark_times.sum() +
3462                           _cleanup_times.sum())/1000.0);
3463   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
3464                 "(%8.2f s marking).",
3465                 cmThread()->vtime_accum(),
3466                 cmThread()->vtime_mark_accum());
3467 }
3468 
3469 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3470   if (use_parallel_marking_threads()) {
3471     _parallel_workers->print_worker_threads_on(st);
3472   }
3473 }
3474 
3475 void ConcurrentMark::print_on_error(outputStream* st) const {
3476   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3477       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3478   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3479   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3480 }
3481 
3482 // We take a break if someone is trying to stop the world.
3483 bool ConcurrentMark::do_yield_check(uint worker_id) {
3484   if (SuspendibleThreadSet::should_yield()) {
3485     if (worker_id == 0) {
3486       _g1h->g1_policy()->record_concurrent_pause();
3487     }
3488     SuspendibleThreadSet::yield();
3489     return true;
3490   } else {
3491     return false;
3492   }
3493 }
3494 
3495 #ifndef PRODUCT
3496 // for debugging purposes
3497 void ConcurrentMark::print_finger() {
3498   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3499                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3500   for (uint i = 0; i < _max_worker_id; ++i) {
3501     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3502   }
3503   gclog_or_tty->cr();
3504 }
3505 #endif
3506 
3507 template<bool scan>
3508 inline void CMTask::process_grey_object(oop obj) {
3509   assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
3510   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3511 
3512   if (_cm->verbose_high()) {
3513     gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
3514                            _worker_id, p2i((void*) obj));
3515   }
3516 
3517   size_t obj_size = obj->size();
3518   _words_scanned += obj_size;
3519 
3520   if (scan) {
3521     obj->oop_iterate(_cm_oop_closure);
3522   }
3523   statsOnly( ++_objs_scanned );
3524   check_limits();
3525 }
3526 
3527 template void CMTask::process_grey_object<true>(oop);
3528 template void CMTask::process_grey_object<false>(oop);
3529 
3530 // Closure for iteration over bitmaps
3531 class CMBitMapClosure : public BitMapClosure {
3532 private:
3533   // the bitmap that is being iterated over
3534   CMBitMap*                   _nextMarkBitMap;
3535   ConcurrentMark*             _cm;
3536   CMTask*                     _task;
3537 
3538 public:
3539   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3540     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3541 
3542   bool do_bit(size_t offset) {
3543     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3544     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3545     assert( addr < _cm->finger(), "invariant");
3546 
3547     statsOnly( _task->increase_objs_found_on_bitmap() );
3548     assert(addr >= _task->finger(), "invariant");
3549 
3550     // We move that task's local finger along.
3551     _task->move_finger_to(addr);
3552 
3553     _task->scan_object(oop(addr));
3554     // we only partially drain the local queue and global stack
3555     _task->drain_local_queue(true);
3556     _task->drain_global_stack(true);
3557 
3558     // if the has_aborted flag has been raised, we need to bail out of
3559     // the iteration
3560     return !_task->has_aborted();
3561   }
3562 };
3563 
3564 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3565                                ConcurrentMark* cm,
3566                                CMTask* task)
3567   : _g1h(g1h), _cm(cm), _task(task) {
3568   assert(_ref_processor == NULL, "should be initialized to NULL");
3569 
3570   if (G1UseConcMarkReferenceProcessing) {
3571     _ref_processor = g1h->ref_processor_cm();
3572     assert(_ref_processor != NULL, "should not be NULL");
3573   }
3574 }
3575 
3576 void CMTask::setup_for_region(HeapRegion* hr) {
3577   assert(hr != NULL,
3578         "claim_region() should have filtered out NULL regions");
3579   assert(!hr->continuesHumongous(),
3580         "claim_region() should have filtered out continues humongous regions");
3581 
3582   if (_cm->verbose_low()) {
3583     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3584                            _worker_id, p2i(hr));
3585   }
3586 
3587   _curr_region  = hr;
3588   _finger       = hr->bottom();
3589   update_region_limit();
3590 }
3591 
3592 void CMTask::update_region_limit() {
3593   HeapRegion* hr            = _curr_region;
3594   HeapWord* bottom          = hr->bottom();
3595   HeapWord* limit           = hr->next_top_at_mark_start();
3596 
3597   if (limit == bottom) {
3598     if (_cm->verbose_low()) {
3599       gclog_or_tty->print_cr("[%u] found an empty region "
3600                              "["PTR_FORMAT", "PTR_FORMAT")",
3601                              _worker_id, p2i(bottom), p2i(limit));
3602     }
3603     // The region was collected underneath our feet.
3604     // We set the finger to bottom to ensure that the bitmap
3605     // iteration that will follow this will not do anything.
3606     // (this is not a condition that holds when we set the region up,
3607     // as the region is not supposed to be empty in the first place)
3608     _finger = bottom;
3609   } else if (limit >= _region_limit) {
3610     assert(limit >= _finger, "peace of mind");
3611   } else {
3612     assert(limit < _region_limit, "only way to get here");
3613     // This can happen under some pretty unusual circumstances.  An
3614     // evacuation pause empties the region underneath our feet (NTAMS
3615     // at bottom). We then do some allocation in the region (NTAMS
3616     // stays at bottom), followed by the region being used as a GC
3617     // alloc region (NTAMS will move to top() and the objects
3618     // originally below it will be grayed). All objects now marked in
3619     // the region are explicitly grayed, if below the global finger,
3620     // and we do not need in fact to scan anything else. So, we simply
3621     // set _finger to be limit to ensure that the bitmap iteration
3622     // doesn't do anything.
3623     _finger = limit;
3624   }
3625 
3626   _region_limit = limit;
3627 }
3628 
3629 void CMTask::giveup_current_region() {
3630   assert(_curr_region != NULL, "invariant");
3631   if (_cm->verbose_low()) {
3632     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3633                            _worker_id, p2i(_curr_region));
3634   }
3635   clear_region_fields();
3636 }
3637 
3638 void CMTask::clear_region_fields() {
3639   // Values for these three fields that indicate that we're not
3640   // holding on to a region.
3641   _curr_region   = NULL;
3642   _finger        = NULL;
3643   _region_limit  = NULL;
3644 }
3645 
3646 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3647   if (cm_oop_closure == NULL) {
3648     assert(_cm_oop_closure != NULL, "invariant");
3649   } else {
3650     assert(_cm_oop_closure == NULL, "invariant");
3651   }
3652   _cm_oop_closure = cm_oop_closure;
3653 }
3654 
3655 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3656   guarantee(nextMarkBitMap != NULL, "invariant");
3657 
3658   if (_cm->verbose_low()) {
3659     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3660   }
3661 
3662   _nextMarkBitMap                = nextMarkBitMap;
3663   clear_region_fields();
3664 
3665   _calls                         = 0;
3666   _elapsed_time_ms               = 0.0;
3667   _termination_time_ms           = 0.0;
3668   _termination_start_time_ms     = 0.0;
3669 
3670 #if _MARKING_STATS_
3671   _local_pushes                  = 0;
3672   _local_pops                    = 0;
3673   _local_max_size                = 0;
3674   _objs_scanned                  = 0;
3675   _global_pushes                 = 0;
3676   _global_pops                   = 0;
3677   _global_max_size               = 0;
3678   _global_transfers_to           = 0;
3679   _global_transfers_from         = 0;
3680   _regions_claimed               = 0;
3681   _objs_found_on_bitmap          = 0;
3682   _satb_buffers_processed        = 0;
3683   _steal_attempts                = 0;
3684   _steals                        = 0;
3685   _aborted                       = 0;
3686   _aborted_overflow              = 0;
3687   _aborted_cm_aborted            = 0;
3688   _aborted_yield                 = 0;
3689   _aborted_timed_out             = 0;
3690   _aborted_satb                  = 0;
3691   _aborted_termination           = 0;
3692 #endif // _MARKING_STATS_
3693 }
3694 
3695 bool CMTask::should_exit_termination() {
3696   regular_clock_call();
3697   // This is called when we are in the termination protocol. We should
3698   // quit if, for some reason, this task wants to abort or the global
3699   // stack is not empty (this means that we can get work from it).
3700   return !_cm->mark_stack_empty() || has_aborted();
3701 }
3702 
3703 void CMTask::reached_limit() {
3704   assert(_words_scanned >= _words_scanned_limit ||
3705          _refs_reached >= _refs_reached_limit ,
3706          "shouldn't have been called otherwise");
3707   regular_clock_call();
3708 }
3709 
3710 void CMTask::regular_clock_call() {
3711   if (has_aborted()) return;
3712 
3713   // First, we need to recalculate the words scanned and refs reached
3714   // limits for the next clock call.
3715   recalculate_limits();
3716 
3717   // During the regular clock call we do the following
3718 
3719   // (1) If an overflow has been flagged, then we abort.
3720   if (_cm->has_overflown()) {
3721     set_has_aborted();
3722     return;
3723   }
3724 
3725   // If we are not concurrent (i.e. we're doing remark) we don't need
3726   // to check anything else. The other steps are only needed during
3727   // the concurrent marking phase.
3728   if (!concurrent()) return;
3729 
3730   // (2) If marking has been aborted for Full GC, then we also abort.
3731   if (_cm->has_aborted()) {
3732     set_has_aborted();
3733     statsOnly( ++_aborted_cm_aborted );
3734     return;
3735   }
3736 
3737   double curr_time_ms = os::elapsedVTime() * 1000.0;
3738 
3739   // (3) If marking stats are enabled, then we update the step history.
3740 #if _MARKING_STATS_
3741   if (_words_scanned >= _words_scanned_limit) {
3742     ++_clock_due_to_scanning;
3743   }
3744   if (_refs_reached >= _refs_reached_limit) {
3745     ++_clock_due_to_marking;
3746   }
3747 
3748   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3749   _interval_start_time_ms = curr_time_ms;
3750   _all_clock_intervals_ms.add(last_interval_ms);
3751 
3752   if (_cm->verbose_medium()) {
3753       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3754                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3755                         _worker_id, last_interval_ms,
3756                         _words_scanned,
3757                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3758                         _refs_reached,
3759                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3760   }
3761 #endif // _MARKING_STATS_
3762 
3763   // (4) We check whether we should yield. If we have to, then we abort.
3764   if (SuspendibleThreadSet::should_yield()) {
3765     // We should yield. To do this we abort the task. The caller is
3766     // responsible for yielding.
3767     set_has_aborted();
3768     statsOnly( ++_aborted_yield );
3769     return;
3770   }
3771 
3772   // (5) We check whether we've reached our time quota. If we have,
3773   // then we abort.
3774   double elapsed_time_ms = curr_time_ms - _start_time_ms;
3775   if (elapsed_time_ms > _time_target_ms) {
3776     set_has_aborted();
3777     _has_timed_out = true;
3778     statsOnly( ++_aborted_timed_out );
3779     return;
3780   }
3781 
3782   // (6) Finally, we check whether there are enough completed STAB
3783   // buffers available for processing. If there are, we abort.
3784   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3785   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3786     if (_cm->verbose_low()) {
3787       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3788                              _worker_id);
3789     }
3790     // we do need to process SATB buffers, we'll abort and restart
3791     // the marking task to do so
3792     set_has_aborted();
3793     statsOnly( ++_aborted_satb );
3794     return;
3795   }
3796 }
3797 
3798 void CMTask::recalculate_limits() {
3799   _real_words_scanned_limit = _words_scanned + words_scanned_period;
3800   _words_scanned_limit      = _real_words_scanned_limit;
3801 
3802   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
3803   _refs_reached_limit       = _real_refs_reached_limit;
3804 }
3805 
3806 void CMTask::decrease_limits() {
3807   // This is called when we believe that we're going to do an infrequent
3808   // operation which will increase the per byte scanned cost (i.e. move
3809   // entries to/from the global stack). It basically tries to decrease the
3810   // scanning limit so that the clock is called earlier.
3811 
3812   if (_cm->verbose_medium()) {
3813     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3814   }
3815 
3816   _words_scanned_limit = _real_words_scanned_limit -
3817     3 * words_scanned_period / 4;
3818   _refs_reached_limit  = _real_refs_reached_limit -
3819     3 * refs_reached_period / 4;
3820 }
3821 
3822 void CMTask::move_entries_to_global_stack() {
3823   // local array where we'll store the entries that will be popped
3824   // from the local queue
3825   oop buffer[global_stack_transfer_size];
3826 
3827   int n = 0;
3828   oop obj;
3829   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3830     buffer[n] = obj;
3831     ++n;
3832   }
3833 
3834   if (n > 0) {
3835     // we popped at least one entry from the local queue
3836 
3837     statsOnly( ++_global_transfers_to; _local_pops += n );
3838 
3839     if (!_cm->mark_stack_push(buffer, n)) {
3840       if (_cm->verbose_low()) {
3841         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3842                                _worker_id);
3843       }
3844       set_has_aborted();
3845     } else {
3846       // the transfer was successful
3847 
3848       if (_cm->verbose_medium()) {
3849         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3850                                _worker_id, n);
3851       }
3852       statsOnly( int tmp_size = _cm->mark_stack_size();
3853                  if (tmp_size > _global_max_size) {
3854                    _global_max_size = tmp_size;
3855                  }
3856                  _global_pushes += n );
3857     }
3858   }
3859 
3860   // this operation was quite expensive, so decrease the limits
3861   decrease_limits();
3862 }
3863 
3864 void CMTask::get_entries_from_global_stack() {
3865   // local array where we'll store the entries that will be popped
3866   // from the global stack.
3867   oop buffer[global_stack_transfer_size];
3868   int n;
3869   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3870   assert(n <= global_stack_transfer_size,
3871          "we should not pop more than the given limit");
3872   if (n > 0) {
3873     // yes, we did actually pop at least one entry
3874 
3875     statsOnly( ++_global_transfers_from; _global_pops += n );
3876     if (_cm->verbose_medium()) {
3877       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3878                              _worker_id, n);
3879     }
3880     for (int i = 0; i < n; ++i) {
3881       bool success = _task_queue->push(buffer[i]);
3882       // We only call this when the local queue is empty or under a
3883       // given target limit. So, we do not expect this push to fail.
3884       assert(success, "invariant");
3885     }
3886 
3887     statsOnly( int tmp_size = _task_queue->size();
3888                if (tmp_size > _local_max_size) {
3889                  _local_max_size = tmp_size;
3890                }
3891                _local_pushes += n );
3892   }
3893 
3894   // this operation was quite expensive, so decrease the limits
3895   decrease_limits();
3896 }
3897 
3898 void CMTask::drain_local_queue(bool partially) {
3899   if (has_aborted()) return;
3900 
3901   // Decide what the target size is, depending whether we're going to
3902   // drain it partially (so that other tasks can steal if they run out
3903   // of things to do) or totally (at the very end).
3904   size_t target_size;
3905   if (partially) {
3906     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3907   } else {
3908     target_size = 0;
3909   }
3910 
3911   if (_task_queue->size() > target_size) {
3912     if (_cm->verbose_high()) {
3913       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3914                              _worker_id, target_size);
3915     }
3916 
3917     oop obj;
3918     bool ret = _task_queue->pop_local(obj);
3919     while (ret) {
3920       statsOnly( ++_local_pops );
3921 
3922       if (_cm->verbose_high()) {
3923         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3924                                p2i((void*) obj));
3925       }
3926 
3927       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3928       assert(!_g1h->is_on_master_free_list(
3929                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3930 
3931       scan_object(obj);
3932 
3933       if (_task_queue->size() <= target_size || has_aborted()) {
3934         ret = false;
3935       } else {
3936         ret = _task_queue->pop_local(obj);
3937       }
3938     }
3939 
3940     if (_cm->verbose_high()) {
3941       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3942                              _worker_id, _task_queue->size());
3943     }
3944   }
3945 }
3946 
3947 void CMTask::drain_global_stack(bool partially) {
3948   if (has_aborted()) return;
3949 
3950   // We have a policy to drain the local queue before we attempt to
3951   // drain the global stack.
3952   assert(partially || _task_queue->size() == 0, "invariant");
3953 
3954   // Decide what the target size is, depending whether we're going to
3955   // drain it partially (so that other tasks can steal if they run out
3956   // of things to do) or totally (at the very end).  Notice that,
3957   // because we move entries from the global stack in chunks or
3958   // because another task might be doing the same, we might in fact
3959   // drop below the target. But, this is not a problem.
3960   size_t target_size;
3961   if (partially) {
3962     target_size = _cm->partial_mark_stack_size_target();
3963   } else {
3964     target_size = 0;
3965   }
3966 
3967   if (_cm->mark_stack_size() > target_size) {
3968     if (_cm->verbose_low()) {
3969       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3970                              _worker_id, target_size);
3971     }
3972 
3973     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3974       get_entries_from_global_stack();
3975       drain_local_queue(partially);
3976     }
3977 
3978     if (_cm->verbose_low()) {
3979       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3980                              _worker_id, _cm->mark_stack_size());
3981     }
3982   }
3983 }
3984 
3985 // SATB Queue has several assumptions on whether to call the par or
3986 // non-par versions of the methods. this is why some of the code is
3987 // replicated. We should really get rid of the single-threaded version
3988 // of the code to simplify things.
3989 void CMTask::drain_satb_buffers() {
3990   if (has_aborted()) return;
3991 
3992   // We set this so that the regular clock knows that we're in the
3993   // middle of draining buffers and doesn't set the abort flag when it
3994   // notices that SATB buffers are available for draining. It'd be
3995   // very counter productive if it did that. :-)
3996   _draining_satb_buffers = true;
3997 
3998   CMSATBBufferClosure satb_cl(this, _g1h);
3999   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
4000 
4001   // This keeps claiming and applying the closure to completed buffers
4002   // until we run out of buffers or we need to abort.
4003   while (!has_aborted() &&
4004          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
4005     if (_cm->verbose_medium()) {
4006       gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4007     }
4008     statsOnly( ++_satb_buffers_processed );
4009     regular_clock_call();
4010   }
4011 
4012   _draining_satb_buffers = false;
4013 
4014   assert(has_aborted() ||
4015          concurrent() ||
4016          satb_mq_set.completed_buffers_num() == 0, "invariant");
4017 
4018   // again, this was a potentially expensive operation, decrease the
4019   // limits to get the regular clock call early
4020   decrease_limits();
4021 }
4022 
4023 void CMTask::print_stats() {
4024   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
4025                          _worker_id, _calls);
4026   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
4027                          _elapsed_time_ms, _termination_time_ms);
4028   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4029                          _step_times_ms.num(), _step_times_ms.avg(),
4030                          _step_times_ms.sd());
4031   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
4032                          _step_times_ms.maximum(), _step_times_ms.sum());
4033 
4034 #if _MARKING_STATS_
4035   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4036                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
4037                          _all_clock_intervals_ms.sd());
4038   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
4039                          _all_clock_intervals_ms.maximum(),
4040                          _all_clock_intervals_ms.sum());
4041   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
4042                          _clock_due_to_scanning, _clock_due_to_marking);
4043   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
4044                          _objs_scanned, _objs_found_on_bitmap);
4045   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
4046                          _local_pushes, _local_pops, _local_max_size);
4047   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
4048                          _global_pushes, _global_pops, _global_max_size);
4049   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
4050                          _global_transfers_to,_global_transfers_from);
4051   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
4052   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
4053   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
4054                          _steal_attempts, _steals);
4055   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
4056   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
4057                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
4058   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
4059                          _aborted_timed_out, _aborted_satb, _aborted_termination);
4060 #endif // _MARKING_STATS_
4061 }
4062 
4063 /*****************************************************************************
4064 
4065     The do_marking_step(time_target_ms, ...) method is the building
4066     block of the parallel marking framework. It can be called in parallel
4067     with other invocations of do_marking_step() on different tasks
4068     (but only one per task, obviously) and concurrently with the
4069     mutator threads, or during remark, hence it eliminates the need
4070     for two versions of the code. When called during remark, it will
4071     pick up from where the task left off during the concurrent marking
4072     phase. Interestingly, tasks are also claimable during evacuation
4073     pauses too, since do_marking_step() ensures that it aborts before
4074     it needs to yield.
4075 
4076     The data structures that it uses to do marking work are the
4077     following:
4078 
4079       (1) Marking Bitmap. If there are gray objects that appear only
4080       on the bitmap (this happens either when dealing with an overflow
4081       or when the initial marking phase has simply marked the roots
4082       and didn't push them on the stack), then tasks claim heap
4083       regions whose bitmap they then scan to find gray objects. A
4084       global finger indicates where the end of the last claimed region
4085       is. A local finger indicates how far into the region a task has
4086       scanned. The two fingers are used to determine how to gray an
4087       object (i.e. whether simply marking it is OK, as it will be
4088       visited by a task in the future, or whether it needs to be also
4089       pushed on a stack).
4090 
4091       (2) Local Queue. The local queue of the task which is accessed
4092       reasonably efficiently by the task. Other tasks can steal from
4093       it when they run out of work. Throughout the marking phase, a
4094       task attempts to keep its local queue short but not totally
4095       empty, so that entries are available for stealing by other
4096       tasks. Only when there is no more work, a task will totally
4097       drain its local queue.
4098 
4099       (3) Global Mark Stack. This handles local queue overflow. During
4100       marking only sets of entries are moved between it and the local
4101       queues, as access to it requires a mutex and more fine-grain
4102       interaction with it which might cause contention. If it
4103       overflows, then the marking phase should restart and iterate
4104       over the bitmap to identify gray objects. Throughout the marking
4105       phase, tasks attempt to keep the global mark stack at a small
4106       length but not totally empty, so that entries are available for
4107       popping by other tasks. Only when there is no more work, tasks
4108       will totally drain the global mark stack.
4109 
4110       (4) SATB Buffer Queue. This is where completed SATB buffers are
4111       made available. Buffers are regularly removed from this queue
4112       and scanned for roots, so that the queue doesn't get too
4113       long. During remark, all completed buffers are processed, as
4114       well as the filled in parts of any uncompleted buffers.
4115 
4116     The do_marking_step() method tries to abort when the time target
4117     has been reached. There are a few other cases when the
4118     do_marking_step() method also aborts:
4119 
4120       (1) When the marking phase has been aborted (after a Full GC).
4121 
4122       (2) When a global overflow (on the global stack) has been
4123       triggered. Before the task aborts, it will actually sync up with
4124       the other tasks to ensure that all the marking data structures
4125       (local queues, stacks, fingers etc.)  are re-initialized so that
4126       when do_marking_step() completes, the marking phase can
4127       immediately restart.
4128 
4129       (3) When enough completed SATB buffers are available. The
4130       do_marking_step() method only tries to drain SATB buffers right
4131       at the beginning. So, if enough buffers are available, the
4132       marking step aborts and the SATB buffers are processed at
4133       the beginning of the next invocation.
4134 
4135       (4) To yield. when we have to yield then we abort and yield
4136       right at the end of do_marking_step(). This saves us from a lot
4137       of hassle as, by yielding we might allow a Full GC. If this
4138       happens then objects will be compacted underneath our feet, the
4139       heap might shrink, etc. We save checking for this by just
4140       aborting and doing the yield right at the end.
4141 
4142     From the above it follows that the do_marking_step() method should
4143     be called in a loop (or, otherwise, regularly) until it completes.
4144 
4145     If a marking step completes without its has_aborted() flag being
4146     true, it means it has completed the current marking phase (and
4147     also all other marking tasks have done so and have all synced up).
4148 
4149     A method called regular_clock_call() is invoked "regularly" (in
4150     sub ms intervals) throughout marking. It is this clock method that
4151     checks all the abort conditions which were mentioned above and
4152     decides when the task should abort. A work-based scheme is used to
4153     trigger this clock method: when the number of object words the
4154     marking phase has scanned or the number of references the marking
4155     phase has visited reach a given limit. Additional invocations to
4156     the method clock have been planted in a few other strategic places
4157     too. The initial reason for the clock method was to avoid calling
4158     vtime too regularly, as it is quite expensive. So, once it was in
4159     place, it was natural to piggy-back all the other conditions on it
4160     too and not constantly check them throughout the code.
4161 
4162     If do_termination is true then do_marking_step will enter its
4163     termination protocol.
4164 
4165     The value of is_serial must be true when do_marking_step is being
4166     called serially (i.e. by the VMThread) and do_marking_step should
4167     skip any synchronization in the termination and overflow code.
4168     Examples include the serial remark code and the serial reference
4169     processing closures.
4170 
4171     The value of is_serial must be false when do_marking_step is
4172     being called by any of the worker threads in a work gang.
4173     Examples include the concurrent marking code (CMMarkingTask),
4174     the MT remark code, and the MT reference processing closures.
4175 
4176  *****************************************************************************/
4177 
4178 void CMTask::do_marking_step(double time_target_ms,
4179                              bool do_termination,
4180                              bool is_serial) {
4181   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4182   assert(concurrent() == _cm->concurrent(), "they should be the same");
4183 
4184   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4185   assert(_task_queues != NULL, "invariant");
4186   assert(_task_queue != NULL, "invariant");
4187   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4188 
4189   assert(!_claimed,
4190          "only one thread should claim this task at any one time");
4191 
4192   // OK, this doesn't safeguard again all possible scenarios, as it is
4193   // possible for two threads to set the _claimed flag at the same
4194   // time. But it is only for debugging purposes anyway and it will
4195   // catch most problems.
4196   _claimed = true;
4197 
4198   _start_time_ms = os::elapsedVTime() * 1000.0;
4199   statsOnly( _interval_start_time_ms = _start_time_ms );
4200 
4201   // If do_stealing is true then do_marking_step will attempt to
4202   // steal work from the other CMTasks. It only makes sense to
4203   // enable stealing when the termination protocol is enabled
4204   // and do_marking_step() is not being called serially.
4205   bool do_stealing = do_termination && !is_serial;
4206 
4207   double diff_prediction_ms =
4208     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4209   _time_target_ms = time_target_ms - diff_prediction_ms;
4210 
4211   // set up the variables that are used in the work-based scheme to
4212   // call the regular clock method
4213   _words_scanned = 0;
4214   _refs_reached  = 0;
4215   recalculate_limits();
4216 
4217   // clear all flags
4218   clear_has_aborted();
4219   _has_timed_out = false;
4220   _draining_satb_buffers = false;
4221 
4222   ++_calls;
4223 
4224   if (_cm->verbose_low()) {
4225     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4226                            "target = %1.2lfms >>>>>>>>>>",
4227                            _worker_id, _calls, _time_target_ms);
4228   }
4229 
4230   // Set up the bitmap and oop closures. Anything that uses them is
4231   // eventually called from this method, so it is OK to allocate these
4232   // statically.
4233   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4234   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
4235   set_cm_oop_closure(&cm_oop_closure);
4236 
4237   if (_cm->has_overflown()) {
4238     // This can happen if the mark stack overflows during a GC pause
4239     // and this task, after a yield point, restarts. We have to abort
4240     // as we need to get into the overflow protocol which happens
4241     // right at the end of this task.
4242     set_has_aborted();
4243   }
4244 
4245   // First drain any available SATB buffers. After this, we will not
4246   // look at SATB buffers before the next invocation of this method.
4247   // If enough completed SATB buffers are queued up, the regular clock
4248   // will abort this task so that it restarts.
4249   drain_satb_buffers();
4250   // ...then partially drain the local queue and the global stack
4251   drain_local_queue(true);
4252   drain_global_stack(true);
4253 
4254   do {
4255     if (!has_aborted() && _curr_region != NULL) {
4256       // This means that we're already holding on to a region.
4257       assert(_finger != NULL, "if region is not NULL, then the finger "
4258              "should not be NULL either");
4259 
4260       // We might have restarted this task after an evacuation pause
4261       // which might have evacuated the region we're holding on to
4262       // underneath our feet. Let's read its limit again to make sure
4263       // that we do not iterate over a region of the heap that
4264       // contains garbage (update_region_limit() will also move
4265       // _finger to the start of the region if it is found empty).
4266       update_region_limit();
4267       // We will start from _finger not from the start of the region,
4268       // as we might be restarting this task after aborting half-way
4269       // through scanning this region. In this case, _finger points to
4270       // the address where we last found a marked object. If this is a
4271       // fresh region, _finger points to start().
4272       MemRegion mr = MemRegion(_finger, _region_limit);
4273 
4274       if (_cm->verbose_low()) {
4275         gclog_or_tty->print_cr("[%u] we're scanning part "
4276                                "["PTR_FORMAT", "PTR_FORMAT") "
4277                                "of region "HR_FORMAT,
4278                                _worker_id, p2i(_finger), p2i(_region_limit),
4279                                HR_FORMAT_PARAMS(_curr_region));
4280       }
4281 
4282       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4283              "humongous regions should go around loop once only");
4284 
4285       // Some special cases:
4286       // If the memory region is empty, we can just give up the region.
4287       // If the current region is humongous then we only need to check
4288       // the bitmap for the bit associated with the start of the object,
4289       // scan the object if it's live, and give up the region.
4290       // Otherwise, let's iterate over the bitmap of the part of the region
4291       // that is left.
4292       // If the iteration is successful, give up the region.
4293       if (mr.is_empty()) {
4294         giveup_current_region();
4295         regular_clock_call();
4296       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4297         if (_nextMarkBitMap->isMarked(mr.start())) {
4298           // The object is marked - apply the closure
4299           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4300           bitmap_closure.do_bit(offset);
4301         }
4302         // Even if this task aborted while scanning the humongous object
4303         // we can (and should) give up the current region.
4304         giveup_current_region();
4305         regular_clock_call();
4306       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4307         giveup_current_region();
4308         regular_clock_call();
4309       } else {
4310         assert(has_aborted(), "currently the only way to do so");
4311         // The only way to abort the bitmap iteration is to return
4312         // false from the do_bit() method. However, inside the
4313         // do_bit() method we move the _finger to point to the
4314         // object currently being looked at. So, if we bail out, we
4315         // have definitely set _finger to something non-null.
4316         assert(_finger != NULL, "invariant");
4317 
4318         // Region iteration was actually aborted. So now _finger
4319         // points to the address of the object we last scanned. If we
4320         // leave it there, when we restart this task, we will rescan
4321         // the object. It is easy to avoid this. We move the finger by
4322         // enough to point to the next possible object header (the
4323         // bitmap knows by how much we need to move it as it knows its
4324         // granularity).
4325         assert(_finger < _region_limit, "invariant");
4326         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4327         // Check if bitmap iteration was aborted while scanning the last object
4328         if (new_finger >= _region_limit) {
4329           giveup_current_region();
4330         } else {
4331           move_finger_to(new_finger);
4332         }
4333       }
4334     }
4335     // At this point we have either completed iterating over the
4336     // region we were holding on to, or we have aborted.
4337 
4338     // We then partially drain the local queue and the global stack.
4339     // (Do we really need this?)
4340     drain_local_queue(true);
4341     drain_global_stack(true);
4342 
4343     // Read the note on the claim_region() method on why it might
4344     // return NULL with potentially more regions available for
4345     // claiming and why we have to check out_of_regions() to determine
4346     // whether we're done or not.
4347     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4348       // We are going to try to claim a new region. We should have
4349       // given up on the previous one.
4350       // Separated the asserts so that we know which one fires.
4351       assert(_curr_region  == NULL, "invariant");
4352       assert(_finger       == NULL, "invariant");
4353       assert(_region_limit == NULL, "invariant");
4354       if (_cm->verbose_low()) {
4355         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4356       }
4357       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4358       if (claimed_region != NULL) {
4359         // Yes, we managed to claim one
4360         statsOnly( ++_regions_claimed );
4361 
4362         if (_cm->verbose_low()) {
4363           gclog_or_tty->print_cr("[%u] we successfully claimed "
4364                                  "region "PTR_FORMAT,
4365                                  _worker_id, p2i(claimed_region));
4366         }
4367 
4368         setup_for_region(claimed_region);
4369         assert(_curr_region == claimed_region, "invariant");
4370       }
4371       // It is important to call the regular clock here. It might take
4372       // a while to claim a region if, for example, we hit a large
4373       // block of empty regions. So we need to call the regular clock
4374       // method once round the loop to make sure it's called
4375       // frequently enough.
4376       regular_clock_call();
4377     }
4378 
4379     if (!has_aborted() && _curr_region == NULL) {
4380       assert(_cm->out_of_regions(),
4381              "at this point we should be out of regions");
4382     }
4383   } while ( _curr_region != NULL && !has_aborted());
4384 
4385   if (!has_aborted()) {
4386     // We cannot check whether the global stack is empty, since other
4387     // tasks might be pushing objects to it concurrently.
4388     assert(_cm->out_of_regions(),
4389            "at this point we should be out of regions");
4390 
4391     if (_cm->verbose_low()) {
4392       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4393     }
4394 
4395     // Try to reduce the number of available SATB buffers so that
4396     // remark has less work to do.
4397     drain_satb_buffers();
4398   }
4399 
4400   // Since we've done everything else, we can now totally drain the
4401   // local queue and global stack.
4402   drain_local_queue(false);
4403   drain_global_stack(false);
4404 
4405   // Attempt at work stealing from other task's queues.
4406   if (do_stealing && !has_aborted()) {
4407     // We have not aborted. This means that we have finished all that
4408     // we could. Let's try to do some stealing...
4409 
4410     // We cannot check whether the global stack is empty, since other
4411     // tasks might be pushing objects to it concurrently.
4412     assert(_cm->out_of_regions() && _task_queue->size() == 0,
4413            "only way to reach here");
4414 
4415     if (_cm->verbose_low()) {
4416       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4417     }
4418 
4419     while (!has_aborted()) {
4420       oop obj;
4421       statsOnly( ++_steal_attempts );
4422 
4423       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4424         if (_cm->verbose_medium()) {
4425           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4426                                  _worker_id, p2i((void*) obj));
4427         }
4428 
4429         statsOnly( ++_steals );
4430 
4431         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4432                "any stolen object should be marked");
4433         scan_object(obj);
4434 
4435         // And since we're towards the end, let's totally drain the
4436         // local queue and global stack.
4437         drain_local_queue(false);
4438         drain_global_stack(false);
4439       } else {
4440         break;
4441       }
4442     }
4443   }
4444 
4445   // If we are about to wrap up and go into termination, check if we
4446   // should raise the overflow flag.
4447   if (do_termination && !has_aborted()) {
4448     if (_cm->force_overflow()->should_force()) {
4449       _cm->set_has_overflown();
4450       regular_clock_call();
4451     }
4452   }
4453 
4454   // We still haven't aborted. Now, let's try to get into the
4455   // termination protocol.
4456   if (do_termination && !has_aborted()) {
4457     // We cannot check whether the global stack is empty, since other
4458     // tasks might be concurrently pushing objects on it.
4459     // Separated the asserts so that we know which one fires.
4460     assert(_cm->out_of_regions(), "only way to reach here");
4461     assert(_task_queue->size() == 0, "only way to reach here");
4462 
4463     if (_cm->verbose_low()) {
4464       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4465     }
4466 
4467     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4468 
4469     // The CMTask class also extends the TerminatorTerminator class,
4470     // hence its should_exit_termination() method will also decide
4471     // whether to exit the termination protocol or not.
4472     bool finished = (is_serial ||
4473                      _cm->terminator()->offer_termination(this));
4474     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4475     _termination_time_ms +=
4476       termination_end_time_ms - _termination_start_time_ms;
4477 
4478     if (finished) {
4479       // We're all done.
4480 
4481       if (_worker_id == 0) {
4482         // let's allow task 0 to do this
4483         if (concurrent()) {
4484           assert(_cm->concurrent_marking_in_progress(), "invariant");
4485           // we need to set this to false before the next
4486           // safepoint. This way we ensure that the marking phase
4487           // doesn't observe any more heap expansions.
4488           _cm->clear_concurrent_marking_in_progress();
4489         }
4490       }
4491 
4492       // We can now guarantee that the global stack is empty, since
4493       // all other tasks have finished. We separated the guarantees so
4494       // that, if a condition is false, we can immediately find out
4495       // which one.
4496       guarantee(_cm->out_of_regions(), "only way to reach here");
4497       guarantee(_cm->mark_stack_empty(), "only way to reach here");
4498       guarantee(_task_queue->size() == 0, "only way to reach here");
4499       guarantee(!_cm->has_overflown(), "only way to reach here");
4500       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4501 
4502       if (_cm->verbose_low()) {
4503         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4504       }
4505     } else {
4506       // Apparently there's more work to do. Let's abort this task. It
4507       // will restart it and we can hopefully find more things to do.
4508 
4509       if (_cm->verbose_low()) {
4510         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4511                                _worker_id);
4512       }
4513 
4514       set_has_aborted();
4515       statsOnly( ++_aborted_termination );
4516     }
4517   }
4518 
4519   // Mainly for debugging purposes to make sure that a pointer to the
4520   // closure which was statically allocated in this frame doesn't
4521   // escape it by accident.
4522   set_cm_oop_closure(NULL);
4523   double end_time_ms = os::elapsedVTime() * 1000.0;
4524   double elapsed_time_ms = end_time_ms - _start_time_ms;
4525   // Update the step history.
4526   _step_times_ms.add(elapsed_time_ms);
4527 
4528   if (has_aborted()) {
4529     // The task was aborted for some reason.
4530 
4531     statsOnly( ++_aborted );
4532 
4533     if (_has_timed_out) {
4534       double diff_ms = elapsed_time_ms - _time_target_ms;
4535       // Keep statistics of how well we did with respect to hitting
4536       // our target only if we actually timed out (if we aborted for
4537       // other reasons, then the results might get skewed).
4538       _marking_step_diffs_ms.add(diff_ms);
4539     }
4540 
4541     if (_cm->has_overflown()) {
4542       // This is the interesting one. We aborted because a global
4543       // overflow was raised. This means we have to restart the
4544       // marking phase and start iterating over regions. However, in
4545       // order to do this we have to make sure that all tasks stop
4546       // what they are doing and re-initialise in a safe manner. We
4547       // will achieve this with the use of two barrier sync points.
4548 
4549       if (_cm->verbose_low()) {
4550         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4551       }
4552 
4553       if (!is_serial) {
4554         // We only need to enter the sync barrier if being called
4555         // from a parallel context
4556         _cm->enter_first_sync_barrier(_worker_id);
4557 
4558         // When we exit this sync barrier we know that all tasks have
4559         // stopped doing marking work. So, it's now safe to
4560         // re-initialise our data structures. At the end of this method,
4561         // task 0 will clear the global data structures.
4562       }
4563 
4564       statsOnly( ++_aborted_overflow );
4565 
4566       // We clear the local state of this task...
4567       clear_region_fields();
4568 
4569       if (!is_serial) {
4570         // ...and enter the second barrier.
4571         _cm->enter_second_sync_barrier(_worker_id);
4572       }
4573       // At this point, if we're during the concurrent phase of
4574       // marking, everything has been re-initialized and we're
4575       // ready to restart.
4576     }
4577 
4578     if (_cm->verbose_low()) {
4579       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4580                              "elapsed = %1.2lfms <<<<<<<<<<",
4581                              _worker_id, _time_target_ms, elapsed_time_ms);
4582       if (_cm->has_aborted()) {
4583         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4584                                _worker_id);
4585       }
4586     }
4587   } else {
4588     if (_cm->verbose_low()) {
4589       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4590                              "elapsed = %1.2lfms <<<<<<<<<<",
4591                              _worker_id, _time_target_ms, elapsed_time_ms);
4592     }
4593   }
4594 
4595   _claimed = false;
4596 }
4597 
4598 CMTask::CMTask(uint worker_id,
4599                ConcurrentMark* cm,
4600                size_t* marked_bytes,
4601                BitMap* card_bm,
4602                CMTaskQueue* task_queue,
4603                CMTaskQueueSet* task_queues)
4604   : _g1h(G1CollectedHeap::heap()),
4605     _worker_id(worker_id), _cm(cm),
4606     _claimed(false),
4607     _nextMarkBitMap(NULL), _hash_seed(17),
4608     _task_queue(task_queue),
4609     _task_queues(task_queues),
4610     _cm_oop_closure(NULL),
4611     _marked_bytes_array(marked_bytes),
4612     _card_bm(card_bm) {
4613   guarantee(task_queue != NULL, "invariant");
4614   guarantee(task_queues != NULL, "invariant");
4615 
4616   statsOnly( _clock_due_to_scanning = 0;
4617              _clock_due_to_marking  = 0 );
4618 
4619   _marking_step_diffs_ms.add(0.5);
4620 }
4621 
4622 // These are formatting macros that are used below to ensure
4623 // consistent formatting. The *_H_* versions are used to format the
4624 // header for a particular value and they should be kept consistent
4625 // with the corresponding macro. Also note that most of the macros add
4626 // the necessary white space (as a prefix) which makes them a bit
4627 // easier to compose.
4628 
4629 // All the output lines are prefixed with this string to be able to
4630 // identify them easily in a large log file.
4631 #define G1PPRL_LINE_PREFIX            "###"
4632 
4633 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
4634 #ifdef _LP64
4635 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
4636 #else // _LP64
4637 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
4638 #endif // _LP64
4639 
4640 // For per-region info
4641 #define G1PPRL_TYPE_FORMAT            "   %-4s"
4642 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
4643 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
4644 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
4645 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
4646 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
4647 
4648 // For summary info
4649 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
4650 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
4651 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
4652 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4653 
4654 G1PrintRegionLivenessInfoClosure::
4655 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4656   : _out(out),
4657     _total_used_bytes(0), _total_capacity_bytes(0),
4658     _total_prev_live_bytes(0), _total_next_live_bytes(0),
4659     _hum_used_bytes(0), _hum_capacity_bytes(0),
4660     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4661     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4662   G1CollectedHeap* g1h = G1CollectedHeap::heap();
4663   MemRegion g1_reserved = g1h->g1_reserved();
4664   double now = os::elapsedTime();
4665 
4666   // Print the header of the output.
4667   _out->cr();
4668   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4669   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4670                  G1PPRL_SUM_ADDR_FORMAT("reserved")
4671                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
4672                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4673                  HeapRegion::GrainBytes);
4674   _out->print_cr(G1PPRL_LINE_PREFIX);
4675   _out->print_cr(G1PPRL_LINE_PREFIX
4676                 G1PPRL_TYPE_H_FORMAT
4677                 G1PPRL_ADDR_BASE_H_FORMAT
4678                 G1PPRL_BYTE_H_FORMAT
4679                 G1PPRL_BYTE_H_FORMAT
4680                 G1PPRL_BYTE_H_FORMAT
4681                 G1PPRL_DOUBLE_H_FORMAT
4682                 G1PPRL_BYTE_H_FORMAT
4683                 G1PPRL_BYTE_H_FORMAT,
4684                 "type", "address-range",
4685                 "used", "prev-live", "next-live", "gc-eff",
4686                 "remset", "code-roots");
4687   _out->print_cr(G1PPRL_LINE_PREFIX
4688                 G1PPRL_TYPE_H_FORMAT
4689                 G1PPRL_ADDR_BASE_H_FORMAT
4690                 G1PPRL_BYTE_H_FORMAT
4691                 G1PPRL_BYTE_H_FORMAT
4692                 G1PPRL_BYTE_H_FORMAT
4693                 G1PPRL_DOUBLE_H_FORMAT
4694                 G1PPRL_BYTE_H_FORMAT
4695                 G1PPRL_BYTE_H_FORMAT,
4696                 "", "",
4697                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4698                 "(bytes)", "(bytes)");
4699 }
4700 
4701 // It takes as a parameter a reference to one of the _hum_* fields, it
4702 // deduces the corresponding value for a region in a humongous region
4703 // series (either the region size, or what's left if the _hum_* field
4704 // is < the region size), and updates the _hum_* field accordingly.
4705 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4706   size_t bytes = 0;
4707   // The > 0 check is to deal with the prev and next live bytes which
4708   // could be 0.
4709   if (*hum_bytes > 0) {
4710     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4711     *hum_bytes -= bytes;
4712   }
4713   return bytes;
4714 }
4715 
4716 // It deduces the values for a region in a humongous region series
4717 // from the _hum_* fields and updates those accordingly. It assumes
4718 // that that _hum_* fields have already been set up from the "starts
4719 // humongous" region and we visit the regions in address order.
4720 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4721                                                      size_t* capacity_bytes,
4722                                                      size_t* prev_live_bytes,
4723                                                      size_t* next_live_bytes) {
4724   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4725   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
4726   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
4727   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4728   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4729 }
4730 
4731 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4732   const char* type       = r->get_type_str();
4733   HeapWord* bottom       = r->bottom();
4734   HeapWord* end          = r->end();
4735   size_t capacity_bytes  = r->capacity();
4736   size_t used_bytes      = r->used();
4737   size_t prev_live_bytes = r->live_bytes();
4738   size_t next_live_bytes = r->next_live_bytes();
4739   double gc_eff          = r->gc_efficiency();
4740   size_t remset_bytes    = r->rem_set()->mem_size();
4741   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4742 
4743   if (r->startsHumongous()) {
4744     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4745            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4746            "they should have been zeroed after the last time we used them");
4747     // Set up the _hum_* fields.
4748     _hum_capacity_bytes  = capacity_bytes;
4749     _hum_used_bytes      = used_bytes;
4750     _hum_prev_live_bytes = prev_live_bytes;
4751     _hum_next_live_bytes = next_live_bytes;
4752     get_hum_bytes(&used_bytes, &capacity_bytes,
4753                   &prev_live_bytes, &next_live_bytes);
4754     end = bottom + HeapRegion::GrainWords;
4755   } else if (r->continuesHumongous()) {
4756     get_hum_bytes(&used_bytes, &capacity_bytes,
4757                   &prev_live_bytes, &next_live_bytes);
4758     assert(end == bottom + HeapRegion::GrainWords, "invariant");
4759   }
4760 
4761   _total_used_bytes      += used_bytes;
4762   _total_capacity_bytes  += capacity_bytes;
4763   _total_prev_live_bytes += prev_live_bytes;
4764   _total_next_live_bytes += next_live_bytes;
4765   _total_remset_bytes    += remset_bytes;
4766   _total_strong_code_roots_bytes += strong_code_roots_bytes;
4767 
4768   // Print a line for this particular region.
4769   _out->print_cr(G1PPRL_LINE_PREFIX
4770                  G1PPRL_TYPE_FORMAT
4771                  G1PPRL_ADDR_BASE_FORMAT
4772                  G1PPRL_BYTE_FORMAT
4773                  G1PPRL_BYTE_FORMAT
4774                  G1PPRL_BYTE_FORMAT
4775                  G1PPRL_DOUBLE_FORMAT
4776                  G1PPRL_BYTE_FORMAT
4777                  G1PPRL_BYTE_FORMAT,
4778                  type, p2i(bottom), p2i(end),
4779                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4780                  remset_bytes, strong_code_roots_bytes);
4781 
4782   return false;
4783 }
4784 
4785 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4786   // add static memory usages to remembered set sizes
4787   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4788   // Print the footer of the output.
4789   _out->print_cr(G1PPRL_LINE_PREFIX);
4790   _out->print_cr(G1PPRL_LINE_PREFIX
4791                  " SUMMARY"
4792                  G1PPRL_SUM_MB_FORMAT("capacity")
4793                  G1PPRL_SUM_MB_PERC_FORMAT("used")
4794                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4795                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4796                  G1PPRL_SUM_MB_FORMAT("remset")
4797                  G1PPRL_SUM_MB_FORMAT("code-roots"),
4798                  bytes_to_mb(_total_capacity_bytes),
4799                  bytes_to_mb(_total_used_bytes),
4800                  perc(_total_used_bytes, _total_capacity_bytes),
4801                  bytes_to_mb(_total_prev_live_bytes),
4802                  perc(_total_prev_live_bytes, _total_capacity_bytes),
4803                  bytes_to_mb(_total_next_live_bytes),
4804                  perc(_total_next_live_bytes, _total_capacity_bytes),
4805                  bytes_to_mb(_total_remset_bytes),
4806                  bytes_to_mb(_total_strong_code_roots_bytes));
4807   _out->cr();
4808 }