1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BarrierSet.hpp"
  27 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc/g1/g1CardTable.inline.hpp"
  29 #include "gc/g1/g1CardTableEntryClosure.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1ConcurrentRefine.hpp"
  32 #include "gc/g1/g1DirtyCardQueue.hpp"
  33 #include "gc/g1/g1FromCardCache.hpp"
  34 #include "gc/g1/g1GCPhaseTimes.hpp"
  35 #include "gc/g1/g1HotCardCache.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RootClosures.hpp"
  38 #include "gc/g1/g1RemSet.hpp"
  39 #include "gc/g1/g1SharedDirtyCardQueue.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/g1/heapRegionManager.inline.hpp"
  42 #include "gc/g1/heapRegionRemSet.inline.hpp"
  43 #include "gc/g1/sparsePRT.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/ptrQueue.hpp"
  46 #include "gc/shared/suspendibleThreadSet.hpp"
  47 #include "jfr/jfrEvents.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/access.inline.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/os.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/globalDefinitions.hpp"
  55 #include "utilities/stack.inline.hpp"
  56 #include "utilities/ticks.hpp"
  57 
  58 // Collects information about the overall heap root scan progress during an evacuation.
  59 //
  60 // Scanning the remembered sets works by first merging all sources of cards to be
  61 // scanned (log buffers, hcc, remembered sets) into a single data structure to remove
  62 // duplicates and simplify work distribution.
  63 //
  64 // During the following card scanning we not only scan this combined set of cards, but
  65 // also remember that these were completely scanned. The following evacuation passes
  66 // do not scan these cards again, and so need to be preserved across increments.
  67 //
  68 // The representation for all the cards to scan is the card table: cards can have
  69 // one of three states during GC:
  70 // - clean: these cards will not be scanned in this pass
  71 // - dirty: these cards will be scanned in this pass
  72 // - scanned: these cards have already been scanned in a previous pass
  73 //
  74 // After all evacuation is done, we reset the card table to clean.
  75 //
  76 // Work distribution occurs on "chunk" basis, i.e. contiguous ranges of cards. As an
  77 // additional optimization, during card merging we remember which regions and which
  78 // chunks actually contain cards to be scanned. Threads iterate only across these
  79 // regions, and only compete for chunks containing any cards.
  80 //
  81 // Within these chunks, a worker scans the card table on "blocks" of cards, i.e.
  82 // contiguous ranges of dirty cards to be scanned. These blocks are converted to actual
  83 // memory ranges and then passed on to actual scanning.
  84 class G1RemSetScanState : public CHeapObj<mtGC> {
  85   class G1DirtyRegions;
  86 
  87   size_t _max_regions;
  88 
  89   // Has this region that is part of the regions in the collection set been processed yet.
  90   typedef bool G1RemsetIterState;
  91 
  92   G1RemsetIterState volatile* _collection_set_iter_state;
  93 
  94   // Card table iteration claim for each heap region, from 0 (completely unscanned)
  95   // to (>=) HeapRegion::CardsPerRegion (completely scanned).
  96   uint volatile* _card_table_scan_state;
  97 
  98   // Return "optimal" number of chunks per region we want to use for claiming areas
  99   // within a region to claim. Dependent on the region size as proxy for the heap
 100   // size, we limit the total number of chunks to limit memory usage and maintenance
 101   // effort of that table vs. granularity of distributing scanning work.
 102   // Testing showed that 8 for 1M/2M region, 16 for 4M/8M regions, 32 for 16/32M regions
 103   // seems to be such a good trade-off.
 104   static uint get_chunks_per_region(uint log_region_size) {
 105     // Limit the expected input values to current known possible values of the
 106     // (log) region size. Adjust as necessary after testing if changing the permissible
 107     // values for region size.
 108     assert(log_region_size >= 20 && log_region_size <= 25,
 109            "expected value in [20,25], but got %u", log_region_size);
 110     return 1u << (log_region_size / 2 - 7);
 111   }
 112 
 113   uint _scan_chunks_per_region;         // Number of chunks per region.
 114   uint8_t _log_scan_chunks_per_region;  // Log of number of chunks per region.
 115   bool* _region_scan_chunks;
 116   size_t _num_total_scan_chunks;        // Total number of elements in _region_scan_chunks.
 117   uint8_t _scan_chunks_shift;           // For conversion between card index and chunk index.
 118 public:
 119   uint scan_chunk_size() const { return (uint)1 << _scan_chunks_shift; }
 120 
 121   // Returns whether the chunk corresponding to the given region/card in region contain a
 122   // dirty card, i.e. actually needs scanning.
 123   bool chunk_needs_scan(uint const region_idx, uint const card_in_region) const {
 124     size_t const idx = ((size_t)region_idx << _log_scan_chunks_per_region) + (card_in_region >> _scan_chunks_shift);
 125     assert(idx < _num_total_scan_chunks, "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT,
 126            idx, _num_total_scan_chunks);
 127     return _region_scan_chunks[idx];
 128   }
 129 
 130 private:
 131   // The complete set of regions which card table needs to be cleared at the end of GC because
 132   // we scribbled all over them.
 133   G1DirtyRegions* _all_dirty_regions;
 134   // The set of regions which card table needs to be scanned for new dirty cards
 135   // in the current evacuation pass.
 136   G1DirtyRegions* _next_dirty_regions;
 137 
 138   // Set of (unique) regions that can be added to concurrently.
 139   class G1DirtyRegions : public CHeapObj<mtGC> {
 140     uint* _buffer;
 141     uint _cur_idx;
 142     size_t _max_regions;
 143 
 144     bool* _contains;
 145 
 146   public:
 147     G1DirtyRegions(size_t max_regions) :
 148       _buffer(NEW_C_HEAP_ARRAY(uint, max_regions, mtGC)),
 149       _cur_idx(0),
 150       _max_regions(max_regions),
 151       _contains(NEW_C_HEAP_ARRAY(bool, max_regions, mtGC)) {
 152 
 153       reset();
 154     }
 155 
 156     static size_t chunk_size() { return M; }
 157 
 158     ~G1DirtyRegions() {
 159       FREE_C_HEAP_ARRAY(uint, _buffer);
 160       FREE_C_HEAP_ARRAY(bool, _contains);
 161     }
 162 
 163     void reset() {
 164       _cur_idx = 0;
 165       ::memset(_contains, false, _max_regions * sizeof(bool));
 166     }
 167 
 168     uint size() const { return _cur_idx; }
 169 
 170     uint at(uint idx) const {
 171       assert(idx < _cur_idx, "Index %u beyond valid regions", idx);
 172       return _buffer[idx];
 173     }
 174 
 175     void add_dirty_region(uint region) {
 176       if (_contains[region]) {
 177         return;
 178       }
 179 
 180       bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false;
 181       if (marked_as_dirty) {
 182         uint allocated = Atomic::add(1u, &_cur_idx) - 1;
 183         _buffer[allocated] = region;
 184       }
 185     }
 186 
 187     // Creates the union of this and the other G1DirtyRegions.
 188     void merge(const G1DirtyRegions* other) {
 189       for (uint i = 0; i < other->size(); i++) {
 190         uint region = other->at(i);
 191         if (!_contains[region]) {
 192           _buffer[_cur_idx++] = region;
 193           _contains[region] = true;
 194         }
 195       }
 196     }
 197   };
 198 
 199   // Creates a snapshot of the current _top values at the start of collection to
 200   // filter out card marks that we do not want to scan.
 201   class G1ResetScanTopClosure : public HeapRegionClosure {
 202     G1RemSetScanState* _scan_state;
 203 
 204   public:
 205     G1ResetScanTopClosure(G1RemSetScanState* scan_state) : _scan_state(scan_state) { }
 206 
 207     virtual bool do_heap_region(HeapRegion* r) {
 208       uint hrm_index = r->hrm_index();
 209       if (r->in_collection_set()) {
 210         // Young regions had their card table marked as young at their allocation;
 211         // we need to make sure that these marks are cleared at the end of GC, *but*
 212         // they should not be scanned for cards.
 213         // So directly add them to the "all_dirty_regions".
 214         // Same for regions in the (initial) collection set: they may contain cards from
 215         // the log buffers, make sure they are cleaned.
 216         _scan_state->add_all_dirty_region(hrm_index);
 217        } else if (r->is_old_or_humongous_or_archive()) {
 218         _scan_state->set_scan_top(hrm_index, r->top());
 219        }
 220        return false;
 221      }
 222   };
 223   // For each region, contains the maximum top() value to be used during this garbage
 224   // collection. Subsumes common checks like filtering out everything but old and
 225   // humongous regions outside the collection set.
 226   // This is valid because we are not interested in scanning stray remembered set
 227   // entries from free or archive regions.
 228   HeapWord** _scan_top;
 229 
 230   class G1ClearCardTableTask : public AbstractGangTask {
 231     G1CollectedHeap* _g1h;
 232     G1DirtyRegions* _regions;
 233     uint _chunk_length;
 234 
 235     uint volatile _cur_dirty_regions;
 236 
 237     G1RemSetScanState* _scan_state;
 238 
 239   public:
 240     G1ClearCardTableTask(G1CollectedHeap* g1h,
 241                          G1DirtyRegions* regions,
 242                          uint chunk_length,
 243                          G1RemSetScanState* scan_state) :
 244       AbstractGangTask("G1 Clear Card Table Task"),
 245       _g1h(g1h),
 246       _regions(regions),
 247       _chunk_length(chunk_length),
 248       _cur_dirty_regions(0),
 249       _scan_state(scan_state) {
 250 
 251       assert(chunk_length > 0, "must be");
 252     }
 253 
 254     static uint chunk_size() { return M; }
 255 
 256     void work(uint worker_id) {
 257       while (_cur_dirty_regions < _regions->size()) {
 258         uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
 259         uint max = MIN2(next + _chunk_length, _regions->size());
 260 
 261         for (uint i = next; i < max; i++) {
 262           HeapRegion* r = _g1h->region_at(_regions->at(i));
 263           if (!r->is_survivor()) {
 264             r->clear_cardtable();
 265           }
 266         }
 267       }
 268     }
 269   };
 270 
 271   // Clear the card table of "dirty" regions.
 272   void clear_card_table(WorkGang* workers) {
 273     uint num_regions = _all_dirty_regions->size();
 274 
 275     if (num_regions == 0) {
 276       return;
 277     }
 278 
 279     uint const num_chunks = (uint)(align_up((size_t)num_regions << HeapRegion::LogCardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size());
 280     uint const num_workers = MIN2(num_chunks, workers->active_workers());
 281     uint const chunk_length = G1ClearCardTableTask::chunk_size() / (uint)HeapRegion::CardsPerRegion;
 282 
 283     // Iterate over the dirty cards region list.
 284     G1ClearCardTableTask cl(G1CollectedHeap::heap(), _all_dirty_regions, chunk_length, this);
 285 
 286     log_debug(gc, ergo)("Running %s using %u workers for %u "
 287                         "units of work for %u regions.",
 288                         cl.name(), num_workers, num_chunks, num_regions);
 289     workers->run_task(&cl, num_workers);
 290 
 291 #ifndef PRODUCT
 292     G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
 293 #endif
 294   }
 295 
 296 public:
 297   G1RemSetScanState() :
 298     _max_regions(0),
 299     _collection_set_iter_state(NULL),
 300     _card_table_scan_state(NULL),
 301     _scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)),
 302     _log_scan_chunks_per_region(log2_uint(_scan_chunks_per_region)),
 303     _region_scan_chunks(NULL),
 304     _num_total_scan_chunks(0),
 305     _scan_chunks_shift(0),
 306     _all_dirty_regions(NULL),
 307     _next_dirty_regions(NULL),
 308     _scan_top(NULL) {
 309   }
 310 
 311   ~G1RemSetScanState() {
 312     FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state);
 313     FREE_C_HEAP_ARRAY(uint, _card_table_scan_state);
 314     FREE_C_HEAP_ARRAY(bool, _region_scan_chunks);
 315     FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
 316   }
 317 
 318   void initialize(size_t max_regions) {
 319     assert(_collection_set_iter_state == NULL, "Must not be initialized twice");
 320     _max_regions = max_regions;
 321     _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
 322     _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
 323     _num_total_scan_chunks = max_regions * _scan_chunks_per_region;
 324     _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC);
 325 
 326     _scan_chunks_shift = (uint8_t)log2_intptr(HeapRegion::CardsPerRegion / _scan_chunks_per_region);
 327     _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC);
 328   }
 329 
 330   void prepare() {
 331     for (size_t i = 0; i < _max_regions; i++) {
 332       _collection_set_iter_state[i] = false;
 333       clear_scan_top((uint)i);
 334     }
 335 
 336     _all_dirty_regions = new G1DirtyRegions(_max_regions);
 337     _next_dirty_regions = new G1DirtyRegions(_max_regions);
 338 
 339     G1ResetScanTopClosure cl(this);
 340     G1CollectedHeap::heap()->heap_region_iterate(&cl);
 341   }
 342 
 343   void prepare_for_merge_heap_roots() {
 344     _all_dirty_regions->merge(_next_dirty_regions);
 345 
 346     _next_dirty_regions->reset();
 347     for (size_t i = 0; i < _max_regions; i++) {
 348       _card_table_scan_state[i] = 0;
 349     }
 350 
 351     ::memset(_region_scan_chunks, false, _num_total_scan_chunks * sizeof(*_region_scan_chunks));
 352   }
 353 
 354   // Returns whether the given region contains cards we need to scan. The remembered
 355   // set and other sources may contain cards that
 356   // - are in uncommitted regions
 357   // - are located in the collection set
 358   // - are located in free regions
 359   // as we do not clean up remembered sets before merging heap roots.
 360   bool contains_cards_to_process(uint const region_idx) const {
 361     HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
 362     return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive());
 363   }
 364 
 365   size_t num_visited_cards() const {
 366     size_t result = 0;
 367     for (uint i = 0; i < _num_total_scan_chunks; i++) {
 368       if (_region_scan_chunks[i]) {
 369         result++;
 370       }
 371     }
 372     return result * (HeapRegion::CardsPerRegion / _scan_chunks_per_region);
 373   }
 374 
 375   size_t num_cards_in_dirty_regions() const {
 376     return _next_dirty_regions->size() * HeapRegion::CardsPerRegion;
 377   }
 378 
 379   void set_chunk_region_dirty(size_t const region_card_idx) {
 380     size_t chunk_idx = region_card_idx >> _scan_chunks_shift;
 381     for (uint i = 0; i < _scan_chunks_per_region; i++) {
 382       _region_scan_chunks[chunk_idx++] = true;
 383     }
 384   }
 385 
 386   void set_chunk_dirty(size_t const card_idx) {
 387     assert((card_idx >> _scan_chunks_shift) < _num_total_scan_chunks,
 388            "Trying to access index " SIZE_FORMAT " out of bounds " SIZE_FORMAT,
 389            card_idx >> _scan_chunks_shift, _num_total_scan_chunks);
 390     size_t const chunk_idx = card_idx >> _scan_chunks_shift;
 391     if (!_region_scan_chunks[chunk_idx]) {
 392       _region_scan_chunks[chunk_idx] = true;
 393     }
 394   }
 395 
 396   void cleanup(WorkGang* workers) {
 397     _all_dirty_regions->merge(_next_dirty_regions);
 398 
 399     clear_card_table(workers);
 400 
 401     delete _all_dirty_regions;
 402     _all_dirty_regions = NULL;
 403 
 404     delete _next_dirty_regions;
 405     _next_dirty_regions = NULL;
 406   }
 407 
 408   void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) {
 409     uint num_regions = _next_dirty_regions->size();
 410 
 411     if (num_regions == 0) {
 412       return;
 413     }
 414 
 415     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 416 
 417     WorkGang* workers = g1h->workers();
 418     uint const max_workers = workers->active_workers();
 419 
 420     uint const start_pos = num_regions * worker_id / max_workers;
 421     uint cur = start_pos;
 422 
 423     do {
 424       bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur)));
 425       guarantee(!result, "Not allowed to ask for early termination.");
 426       cur++;
 427       if (cur == _next_dirty_regions->size()) {
 428         cur = 0;
 429       }
 430     } while (cur != start_pos);
 431   }
 432 
 433   // Attempt to claim the given region in the collection set for iteration. Returns true
 434   // if this call caused the transition from Unclaimed to Claimed.
 435   inline bool claim_collection_set_region(uint region) {
 436     assert(region < _max_regions, "Tried to access invalid region %u", region);
 437     if (_collection_set_iter_state[region]) {
 438       return false;
 439     }
 440     return !Atomic::cmpxchg(true, &_collection_set_iter_state[region], false);
 441   }
 442 
 443   bool has_cards_to_scan(uint region) {
 444     assert(region < _max_regions, "Tried to access invalid region %u", region);
 445     return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
 446   }
 447 
 448   uint claim_cards_to_scan(uint region, uint increment) {
 449     assert(region < _max_regions, "Tried to access invalid region %u", region);
 450     return Atomic::add(increment, &_card_table_scan_state[region]) - increment;
 451   }
 452 
 453   void add_dirty_region(uint const region) {
 454 #ifdef ASSERT
 455    HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
 456    assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(),
 457           "Region %u is not suitable for scanning, is %sin collection set or %s",
 458           hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str());
 459 #endif
 460     _next_dirty_regions->add_dirty_region(region);
 461   }
 462 
 463   void add_all_dirty_region(uint region) {
 464 #ifdef ASSERT
 465     HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
 466     assert(hr->in_collection_set(),
 467            "Only add young regions to all dirty regions directly but %u is %s",
 468            hr->hrm_index(), hr->get_short_type_str());
 469 #endif
 470     _all_dirty_regions->add_dirty_region(region);
 471   }
 472 
 473   void set_scan_top(uint region_idx, HeapWord* value) {
 474     _scan_top[region_idx] = value;
 475   }
 476 
 477   HeapWord* scan_top(uint region_idx) const {
 478     return _scan_top[region_idx];
 479   }
 480 
 481   void clear_scan_top(uint region_idx) {
 482     set_scan_top(region_idx, NULL);
 483   }
 484 };
 485 
 486 G1RemSet::G1RemSet(G1CollectedHeap* g1h,
 487                    G1CardTable* ct,
 488                    G1HotCardCache* hot_card_cache) :
 489   _scan_state(new G1RemSetScanState()),
 490   _prev_period_summary(false),
 491   _g1h(g1h),
 492   _ct(ct),
 493   _g1p(_g1h->policy()),
 494   _hot_card_cache(hot_card_cache) {
 495 }
 496 
 497 G1RemSet::~G1RemSet() {
 498   delete _scan_state;
 499 }
 500 
 501 uint G1RemSet::num_par_rem_sets() {
 502   return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
 503 }
 504 
 505 void G1RemSet::initialize(size_t capacity, uint max_regions) {
 506   G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
 507   _scan_state->initialize(max_regions);
 508 }
 509 
 510 // Helper class to scan and detect ranges of cards that need to be scanned on the
 511 // card table.
 512 class G1CardTableScanner : public StackObj {
 513 public:
 514   typedef CardTable::CardValue CardValue;
 515 
 516 private:
 517   CardValue* const _base_addr;
 518 
 519   CardValue* _cur_addr;
 520   CardValue* const _end_addr;
 521 
 522   static const size_t ToScanMask = G1CardTable::g1_card_already_scanned;
 523   static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned;
 524 
 525   bool cur_addr_aligned() const {
 526     return ((uintptr_t)_cur_addr) % sizeof(size_t) == 0;
 527   }
 528 
 529   bool cur_card_is_dirty() const {
 530     CardValue value = *_cur_addr;
 531     return (value & ToScanMask) == 0;
 532   }
 533 
 534   bool cur_word_of_cards_contains_any_dirty_card() const {
 535     assert(cur_addr_aligned(), "Current address should be aligned");
 536     size_t const value = *(size_t*)_cur_addr;
 537     return (~value & ExpandedToScanMask) != 0;
 538   }
 539 
 540   bool cur_word_of_cards_all_dirty_cards() const {
 541     size_t const value = *(size_t*)_cur_addr;
 542     return value == G1CardTable::WordAllDirty;
 543   }
 544 
 545   size_t get_and_advance_pos() {
 546     _cur_addr++;
 547     return pointer_delta(_cur_addr, _base_addr, sizeof(CardValue)) - 1;
 548   }
 549 
 550 public:
 551   G1CardTableScanner(CardValue* start_card, size_t size) :
 552     _base_addr(start_card),
 553     _cur_addr(start_card),
 554     _end_addr(start_card + size) {
 555 
 556     assert(is_aligned(start_card, sizeof(size_t)), "Unaligned start addr " PTR_FORMAT, p2i(start_card));
 557     assert(is_aligned(size, sizeof(size_t)), "Unaligned size " SIZE_FORMAT, size);
 558   }
 559 
 560   size_t find_next_dirty() {
 561     while (!cur_addr_aligned()) {
 562       if (cur_card_is_dirty()) {
 563         return get_and_advance_pos();
 564       }
 565       _cur_addr++;
 566     }
 567 
 568     assert(cur_addr_aligned(), "Current address should be aligned now.");
 569     while (_cur_addr != _end_addr) {
 570       if (cur_word_of_cards_contains_any_dirty_card()) {
 571         for (size_t i = 0; i < sizeof(size_t); i++) {
 572           if (cur_card_is_dirty()) {
 573             return get_and_advance_pos();
 574           }
 575           _cur_addr++;
 576         }
 577         assert(false, "Should not reach here given we detected a dirty card in the word.");
 578       }
 579       _cur_addr += sizeof(size_t);
 580     }
 581     return get_and_advance_pos();
 582   }
 583 
 584   size_t find_next_non_dirty() {
 585     assert(_cur_addr <= _end_addr, "Not allowed to search for marks after area.");
 586 
 587     while (!cur_addr_aligned()) {
 588       if (!cur_card_is_dirty()) {
 589         return get_and_advance_pos();
 590       }
 591       _cur_addr++;
 592     }
 593 
 594     assert(cur_addr_aligned(), "Current address should be aligned now.");
 595     while (_cur_addr != _end_addr) {
 596       if (!cur_word_of_cards_all_dirty_cards()) {
 597         for (size_t i = 0; i < sizeof(size_t); i++) {
 598           if (!cur_card_is_dirty()) {
 599             return get_and_advance_pos();
 600           }
 601           _cur_addr++;
 602         }
 603         assert(false, "Should not reach here given we detected a non-dirty card in the word.");
 604       }
 605       _cur_addr += sizeof(size_t);
 606     }
 607     return get_and_advance_pos();
 608   }
 609 };
 610 
 611 // Helper class to claim dirty chunks within the card table.
 612 class G1CardTableChunkClaimer {
 613   G1RemSetScanState* _scan_state;
 614   uint _region_idx;
 615   uint _cur_claim;
 616 
 617 public:
 618   G1CardTableChunkClaimer(G1RemSetScanState* scan_state, uint region_idx) :
 619     _scan_state(scan_state),
 620     _region_idx(region_idx),
 621     _cur_claim(0) {
 622     guarantee(size() <= HeapRegion::CardsPerRegion, "Should not claim more space than possible.");
 623   }
 624 
 625   bool has_next() {
 626     while (true) {
 627       _cur_claim = _scan_state->claim_cards_to_scan(_region_idx, size());
 628       if (_cur_claim >= HeapRegion::CardsPerRegion) {
 629         return false;
 630       }
 631       if (_scan_state->chunk_needs_scan(_region_idx, _cur_claim)) {
 632         return true;
 633       }
 634     }
 635   }
 636 
 637   uint value() const { return _cur_claim; }
 638   uint size() const { return _scan_state->scan_chunk_size(); }
 639 };
 640 
 641 // Scans a heap region for dirty cards.
 642 class G1ScanHRForRegionClosure : public HeapRegionClosure {
 643   G1CollectedHeap* _g1h;
 644   G1CardTable* _ct;
 645   G1BlockOffsetTable* _bot;
 646 
 647   G1ParScanThreadState* _pss;
 648 
 649   G1RemSetScanState* _scan_state;
 650 
 651   G1GCPhaseTimes::GCParPhases _phase;
 652 
 653   uint   _worker_id;
 654 
 655   size_t _cards_scanned;
 656   size_t _blocks_scanned;
 657   size_t _chunks_claimed;
 658 
 659   Tickspan _rem_set_root_scan_time;
 660   Tickspan _rem_set_trim_partially_time;
 661 
 662   // The address to which this thread already scanned (walked the heap) up to during
 663   // card scanning (exclusive).
 664   HeapWord* _scanned_to;
 665 
 666   HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) {
 667     HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
 668     G1ScanCardClosure card_cl(_g1h, _pss);
 669 
 670     HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl);
 671     assert(scanned_to != NULL, "Should be able to scan range");
 672     assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end()));
 673 
 674     _pss->trim_queue_partially();
 675     return scanned_to;
 676   }
 677 
 678   void do_claimed_block(uint const region_idx_for_card, size_t const first_card, size_t const num_cards) {
 679     HeapWord* const card_start = _bot->address_for_index_raw(first_card);
 680 #ifdef ASSERT
 681     HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card);
 682     assert(hr == NULL || hr->is_in_reserved(card_start),
 683              "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index());
 684 #endif
 685     HeapWord* const top = _scan_state->scan_top(region_idx_for_card);
 686     if (card_start >= top) {
 687       return;
 688     }
 689 
 690     HeapWord* scan_end = MIN2(card_start + (num_cards << BOTConstants::LogN_words), top);
 691     if (_scanned_to >= scan_end) {
 692       return;
 693     }
 694     MemRegion mr(MAX2(card_start, _scanned_to), scan_end);
 695     _scanned_to = scan_memregion(region_idx_for_card, mr);
 696 
 697     _cards_scanned += num_cards;
 698   }
 699 
 700   ALWAYSINLINE void do_card_block(uint const region_idx, size_t const first_card, size_t const num_cards) {
 701     _ct->mark_as_scanned(first_card, num_cards);
 702     do_claimed_block(region_idx, first_card, num_cards);
 703     _blocks_scanned++;
 704   }
 705 
 706    void scan_heap_roots(HeapRegion* r) {
 707     EventGCPhaseParallel event;
 708     uint const region_idx = r->hrm_index();
 709 
 710     ResourceMark rm;
 711 
 712     G1CardTableChunkClaimer claim(_scan_state, region_idx);
 713 
 714     // Set the current scan "finger" to NULL for every heap region to scan. Since
 715     // the claim value is monotonically increasing, the check to not scan below this
 716     // will filter out objects spanning chunks within the region too then, as opposed
 717     // to resetting this value for every claim.
 718     _scanned_to = NULL;
 719 
 720     while (claim.has_next()) {
 721       size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value();
 722       CardTable::CardValue* const base_addr = _ct->byte_for_index(region_card_base_idx);
 723 
 724       G1CardTableScanner scan(base_addr, claim.size());
 725 
 726       size_t first_scan_idx = scan.find_next_dirty();
 727       while (first_scan_idx != claim.size()) {
 728         assert(*_ct->byte_for_index(region_card_base_idx + first_scan_idx) <= 0x1, "is %d at region %u idx " SIZE_FORMAT, *_ct->byte_for_index(region_card_base_idx + first_scan_idx), region_idx, first_scan_idx);
 729 
 730         size_t const last_scan_idx = scan.find_next_non_dirty();
 731         size_t const len = last_scan_idx - first_scan_idx;
 732 
 733         do_card_block(region_idx, region_card_base_idx + first_scan_idx, len);
 734 
 735         if (last_scan_idx == claim.size()) {
 736           break;
 737         }
 738 
 739         first_scan_idx = scan.find_next_dirty();
 740       }
 741       _chunks_claimed++;
 742     }
 743 
 744     event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanHR));
 745   }
 746 
 747 public:
 748   G1ScanHRForRegionClosure(G1RemSetScanState* scan_state,
 749                            G1ParScanThreadState* pss,
 750                            uint worker_id,
 751                            G1GCPhaseTimes::GCParPhases phase) :
 752     _g1h(G1CollectedHeap::heap()),
 753     _ct(_g1h->card_table()),
 754     _bot(_g1h->bot()),
 755     _pss(pss),
 756     _scan_state(scan_state),
 757     _phase(phase),
 758     _worker_id(worker_id),
 759     _cards_scanned(0),
 760     _blocks_scanned(0),
 761     _chunks_claimed(0),
 762     _rem_set_root_scan_time(),
 763     _rem_set_trim_partially_time(),
 764     _scanned_to(NULL) {
 765   }
 766 
 767   bool do_heap_region(HeapRegion* r) {
 768     assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(),
 769            "Should only be called on old gen non-collection set regions but region %u is not.",
 770            r->hrm_index());
 771     uint const region_idx = r->hrm_index();
 772 
 773     if (_scan_state->has_cards_to_scan(region_idx)) {
 774       G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
 775       scan_heap_roots(r);
 776     }
 777     return false;
 778   }
 779 
 780   Tickspan rem_set_root_scan_time() const { return _rem_set_root_scan_time; }
 781   Tickspan rem_set_trim_partially_time() const { return _rem_set_trim_partially_time; }
 782 
 783   size_t cards_scanned() const { return _cards_scanned; }
 784   size_t blocks_scanned() const { return _blocks_scanned; }
 785   size_t chunks_claimed() const { return _chunks_claimed; }
 786 };
 787 
 788 void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss,
 789                             uint worker_id,
 790                             G1GCPhaseTimes::GCParPhases scan_phase,
 791                             G1GCPhaseTimes::GCParPhases objcopy_phase) {
 792   G1ScanHRForRegionClosure cl(_scan_state, pss, worker_id, scan_phase);
 793   _scan_state->iterate_dirty_regions_from(&cl, worker_id);
 794 
 795   G1GCPhaseTimes* p = _g1p->phase_times();
 796 
 797   p->record_or_add_time_secs(objcopy_phase, worker_id, cl.rem_set_trim_partially_time().seconds());
 798 
 799   p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_root_scan_time().seconds());
 800   p->record_or_add_thread_work_item(scan_phase, worker_id, cl.cards_scanned(), G1GCPhaseTimes::ScanHRScannedCards);
 801   p->record_or_add_thread_work_item(scan_phase, worker_id, cl.blocks_scanned(), G1GCPhaseTimes::ScanHRScannedBlocks);
 802   p->record_or_add_thread_work_item(scan_phase, worker_id, cl.chunks_claimed(), G1GCPhaseTimes::ScanHRClaimedChunks);
 803 }
 804 
 805 // Heap region closure to be applied to all regions in the current collection set
 806 // increment to fix up non-card related roots.
 807 class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
 808   G1ParScanThreadState* _pss;
 809   G1RemSetScanState* _scan_state;
 810 
 811   G1GCPhaseTimes::GCParPhases _scan_phase;
 812   G1GCPhaseTimes::GCParPhases _code_roots_phase;
 813 
 814   uint _worker_id;
 815 
 816   size_t _opt_refs_scanned;
 817   size_t _opt_refs_memory_used;
 818 
 819   Tickspan _strong_code_root_scan_time;
 820   Tickspan _strong_code_trim_partially_time;
 821 
 822   Tickspan _rem_set_opt_root_scan_time;
 823   Tickspan _rem_set_opt_trim_partially_time;
 824 
 825   void scan_opt_rem_set_roots(HeapRegion* r) {
 826     EventGCPhaseParallel event;
 827 
 828     G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
 829 
 830     G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss);
 831     G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl);
 832     _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->strong_oops());
 833     _opt_refs_memory_used += opt_rem_set_list->used_memory();
 834 
 835     event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase));
 836   }
 837 
 838 public:
 839   G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state,
 840                                    G1ParScanThreadState* pss,
 841                                    uint worker_id,
 842                                    G1GCPhaseTimes::GCParPhases scan_phase,
 843                                    G1GCPhaseTimes::GCParPhases code_roots_phase) :
 844     _pss(pss),
 845     _scan_state(scan_state),
 846     _scan_phase(scan_phase),
 847     _code_roots_phase(code_roots_phase),
 848     _worker_id(worker_id),
 849     _opt_refs_scanned(0),
 850     _opt_refs_memory_used(0),
 851     _strong_code_root_scan_time(),
 852     _strong_code_trim_partially_time(),
 853     _rem_set_opt_root_scan_time(),
 854     _rem_set_opt_trim_partially_time() { }
 855 
 856   bool do_heap_region(HeapRegion* r) {
 857     uint const region_idx = r->hrm_index();
 858 
 859     // The individual references for the optional remembered set are per-worker, so we
 860     // always need to scan them.
 861     if (r->has_index_in_opt_cset()) {
 862       G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time);
 863       scan_opt_rem_set_roots(r);
 864     }
 865 
 866     if (_scan_state->claim_collection_set_region(region_idx)) {
 867       EventGCPhaseParallel event;
 868 
 869       G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time);
 870       // Scan the strong code root list attached to the current region
 871       r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
 872 
 873       event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_code_roots_phase));
 874     }
 875 
 876     return false;
 877   }
 878 
 879   Tickspan strong_code_root_scan_time() const { return _strong_code_root_scan_time;  }
 880   Tickspan strong_code_root_trim_partially_time() const { return _strong_code_trim_partially_time; }
 881 
 882   Tickspan rem_set_opt_root_scan_time() const { return _rem_set_opt_root_scan_time; }
 883   Tickspan rem_set_opt_trim_partially_time() const { return _rem_set_opt_trim_partially_time; }
 884 
 885   size_t opt_refs_scanned() const { return _opt_refs_scanned; }
 886   size_t opt_refs_memory_used() const { return _opt_refs_memory_used; }
 887 };
 888 
 889 void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss,
 890                                            uint worker_id,
 891                                            G1GCPhaseTimes::GCParPhases scan_phase,
 892                                            G1GCPhaseTimes::GCParPhases coderoots_phase,
 893                                            G1GCPhaseTimes::GCParPhases objcopy_phase) {
 894   G1ScanCollectionSetRegionClosure cl(_scan_state, pss, worker_id, scan_phase, coderoots_phase);
 895   _g1h->collection_set_iterate_increment_from(&cl, worker_id);
 896 
 897   G1GCPhaseTimes* p = _g1h->phase_times();
 898 
 899   p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_root_scan_time().seconds());
 900   p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_trim_partially_time().seconds());
 901 
 902   p->record_or_add_time_secs(coderoots_phase, worker_id, cl.strong_code_root_scan_time().seconds());
 903   p->add_time_secs(objcopy_phase, worker_id, cl.strong_code_root_trim_partially_time().seconds());
 904 
 905   // At this time we record some metrics only for the evacuations after the initial one.
 906   if (scan_phase == G1GCPhaseTimes::OptScanHR) {
 907     p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanHRScannedOptRefs);
 908     p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanHRUsedMemory);
 909   }
 910 }
 911 
 912 void G1RemSet::prepare_for_scan_heap_roots() {
 913   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
 914   dcqs.concatenate_logs();
 915 
 916   _scan_state->prepare();
 917 }
 918 
 919 class G1MergeHeapRootsTask : public AbstractGangTask {
 920 
 921   // Visitor for remembered sets, dropping entries onto the card table.
 922   class G1MergeCardSetClosure : public HeapRegionClosure {
 923     G1RemSetScanState* _scan_state;
 924     G1CardTable* _ct;
 925 
 926     uint _merged_sparse;
 927     uint _merged_fine;
 928     uint _merged_coarse;
 929 
 930     // Returns if the region contains cards we need to scan. If so, remember that
 931     // region in the current set of dirty regions.
 932     bool remember_if_interesting(uint const region_idx) {
 933       if (!_scan_state->contains_cards_to_process(region_idx)) {
 934         return false;
 935       }
 936       _scan_state->add_dirty_region(region_idx);
 937       return true;
 938     }
 939   public:
 940     G1MergeCardSetClosure(G1RemSetScanState* scan_state) :
 941       _scan_state(scan_state),
 942       _ct(G1CollectedHeap::heap()->card_table()),
 943       _merged_sparse(0),
 944       _merged_fine(0),
 945       _merged_coarse(0) { }
 946 
 947     void next_coarse_prt(uint const region_idx) {
 948       if (!remember_if_interesting(region_idx)) {
 949         return;
 950       }
 951 
 952       _merged_coarse++;
 953 
 954       size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
 955       _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion);
 956       _scan_state->set_chunk_region_dirty(region_base_idx);
 957     }
 958 
 959     void next_fine_prt(uint const region_idx, BitMap* bm) {
 960       if (!remember_if_interesting(region_idx)) {
 961         return;
 962       }
 963 
 964       _merged_fine++;
 965 
 966       size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
 967       BitMap::idx_t cur = bm->get_next_one_offset(0);
 968       while (cur != bm->size()) {
 969         _ct->mark_clean_as_dirty(region_base_idx + cur);
 970         _scan_state->set_chunk_dirty(region_base_idx + cur);
 971         cur = bm->get_next_one_offset(cur + 1);
 972       }
 973     }
 974 
 975     void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) {
 976       if (!remember_if_interesting(region_idx)) {
 977         return;
 978       }
 979 
 980       _merged_sparse++;
 981 
 982       size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
 983       for (uint i = 0; i < num_cards; i++) {
 984         size_t card_idx = region_base_idx + cards[i];
 985         _ct->mark_clean_as_dirty(card_idx);
 986         _scan_state->set_chunk_dirty(card_idx);
 987       }
 988     }
 989 
 990     virtual bool do_heap_region(HeapRegion* r) {
 991       assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
 992 
 993       HeapRegionRemSet* rem_set = r->rem_set();
 994       if (!rem_set->is_empty()) {
 995         rem_set->iterate_prts(*this);
 996       }
 997 
 998       return false;
 999     }
1000 
1001     size_t merged_sparse() const { return _merged_sparse; }
1002     size_t merged_fine() const { return _merged_fine; }
1003     size_t merged_coarse() const { return _merged_coarse; }
1004   };
1005 
1006   // Visitor for the remembered sets of humongous candidate regions to merge their
1007   // remembered set into the card table.
1008   class G1FlushHumongousCandidateRemSets : public HeapRegionClosure {
1009     G1MergeCardSetClosure _cl;
1010 
1011   public:
1012     G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { }
1013 
1014     virtual bool do_heap_region(HeapRegion* r) {
1015       G1CollectedHeap* g1h = G1CollectedHeap::heap();
1016 
1017       if (!r->is_starts_humongous() ||
1018           !g1h->region_attr(r->hrm_index()).is_humongous() ||
1019           r->rem_set()->is_empty()) {
1020         return false;
1021       }
1022 
1023       guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
1024                 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
1025 
1026       _cl.do_heap_region(r);
1027 
1028       // We should only clear the card based remembered set here as we will not
1029       // implicitly rebuild anything else during eager reclaim. Note that at the moment
1030       // (and probably never) we do not enter this path if there are other kind of
1031       // remembered sets for this region.
1032       r->rem_set()->clear_locked(true /* only_cardset */);
1033       // Clear_locked() above sets the state to Empty. However we want to continue
1034       // collecting remembered set entries for humongous regions that were not
1035       // reclaimed.
1036       r->rem_set()->set_state_complete();
1037 #ifdef ASSERT
1038       G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index());
1039       assert(region_attr.needs_remset_update(), "must be");
1040 #endif
1041       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
1042 
1043       return false;
1044     }
1045 
1046     size_t merged_sparse() const { return _cl.merged_sparse(); }
1047     size_t merged_fine() const { return _cl.merged_fine(); }
1048     size_t merged_coarse() const { return _cl.merged_coarse(); }
1049   };
1050 
1051   // Visitor for the log buffer entries to merge them into the card table.
1052   class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
1053     G1RemSetScanState* _scan_state;
1054     G1CardTable* _ct;
1055 
1056     size_t _cards_dirty;
1057     size_t _cards_skipped;
1058   public:
1059     G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
1060       _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
1061     {}
1062 
1063     void do_card_ptr(CardValue* card_ptr, uint worker_id) {
1064       // The only time we care about recording cards that
1065       // contain references that point into the collection set
1066       // is during RSet updating within an evacuation pause.
1067       // In this case worker_id should be the id of a GC worker thread.
1068       assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
1069 
1070       uint const region_idx = _ct->region_idx_for(card_ptr);
1071 
1072       // The second clause must come after - the log buffers might contain cards to uncommited
1073       // regions.
1074       // This code may count duplicate entries in the log buffers (even if rare) multiple
1075       // times.
1076       if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) {
1077         _scan_state->add_dirty_region(region_idx);
1078         _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr));
1079         _cards_dirty++;
1080       } else {
1081         // We may have had dirty cards in the (initial) collection set (or the
1082         // young regions which are always in the initial collection set). We do
1083         // not fix their cards here: we already added these regions to the set of
1084         // regions to clear the card table at the end during the prepare() phase.
1085         _cards_skipped++;
1086       }
1087     }
1088 
1089     size_t cards_dirty() const { return _cards_dirty; }
1090     size_t cards_skipped() const { return _cards_skipped; }
1091   };
1092 
1093   HeapRegionClaimer _hr_claimer;
1094   G1RemSetScanState* _scan_state;
1095   BufferNode::Stack _dirty_card_buffers;
1096   bool _initial_evacuation;
1097 
1098   volatile bool _fast_reclaim_handled;
1099 
1100   void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) {
1101     G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1102     size_t buffer_size = dcqs.buffer_size();
1103     while (BufferNode* node = _dirty_card_buffers.pop()) {
1104       cl->apply_to_buffer(node, buffer_size, worker_id);
1105       dcqs.deallocate_buffer(node);
1106     }
1107   }
1108 
1109 public:
1110   G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) :
1111     AbstractGangTask("G1 Merge Heap Roots"),
1112     _hr_claimer(num_workers),
1113     _scan_state(scan_state),
1114     _dirty_card_buffers(),
1115     _initial_evacuation(initial_evacuation),
1116     _fast_reclaim_handled(false)
1117   {
1118     if (initial_evacuation) {
1119       G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1120       G1BufferNodeList buffers = dcqs.take_all_completed_buffers();
1121       if (buffers._entry_count != 0) {
1122         _dirty_card_buffers.prepend(*buffers._head, *buffers._tail);
1123       }
1124     }
1125   }
1126 
1127   virtual void work(uint worker_id) {
1128     G1CollectedHeap* g1h = G1CollectedHeap::heap();
1129     G1GCPhaseTimes* p = g1h->phase_times();
1130 
1131     G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ?
1132                                                      G1GCPhaseTimes::MergeRS :
1133                                                      G1GCPhaseTimes::OptMergeRS;
1134 
1135     // We schedule flushing the remembered sets of humongous fast reclaim candidates
1136     // onto the card table first to allow the remaining parallelized tasks hide it.
1137     if (_initial_evacuation &&
1138         p->fast_reclaim_humongous_candidates() > 0 &&
1139         !_fast_reclaim_handled &&
1140         !Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) {
1141 
1142       G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
1143 
1144       G1FlushHumongousCandidateRemSets cl(_scan_state);
1145       g1h->heap_region_iterate(&cl);
1146 
1147       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1148       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1149       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1150     }
1151 
1152     // Merge remembered sets of current candidates.
1153     {
1154       G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */);
1155       G1MergeCardSetClosure cl(_scan_state);
1156       g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
1157 
1158       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1159       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1160       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1161     }
1162 
1163     // Apply closure to log entries in the HCC.
1164     if (_initial_evacuation && G1HotCardCache::default_use_cache()) {
1165       assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1166       G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id);
1167       G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1168       g1h->iterate_hcc_closure(&cl, worker_id);
1169 
1170       p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards);
1171       p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards);
1172     }
1173 
1174     // Now apply the closure to all remaining log entries.
1175     if (_initial_evacuation) {
1176       assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1177       G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id);
1178 
1179       G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1180       apply_closure_to_dirty_card_buffers(&cl, worker_id);
1181 
1182       p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeLBDirtyCards);
1183       p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeLBSkippedCards);
1184     }
1185   }
1186 };
1187 
1188 void G1RemSet::print_merge_heap_roots_stats() {
1189   size_t num_visited_cards = _scan_state->num_visited_cards();
1190 
1191   size_t total_dirty_region_cards = _scan_state->num_cards_in_dirty_regions();
1192 
1193   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1194   size_t total_old_region_cards =
1195     (g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * HeapRegion::CardsPerRegion;
1196 
1197   log_debug(gc,remset)("Visited cards " SIZE_FORMAT " Total dirty " SIZE_FORMAT " (%.2lf%%) Total old " SIZE_FORMAT " (%.2lf%%)",
1198                        num_visited_cards,
1199                        total_dirty_region_cards,
1200                        percent_of(num_visited_cards, total_dirty_region_cards),
1201                        total_old_region_cards,
1202                        percent_of(num_visited_cards, total_old_region_cards));
1203 }
1204 
1205 void G1RemSet::merge_heap_roots(bool initial_evacuation) {
1206   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1207 
1208   {
1209     Ticks start = Ticks::now();
1210 
1211     _scan_state->prepare_for_merge_heap_roots();
1212 
1213     Tickspan total = Ticks::now() - start;
1214     if (initial_evacuation) {
1215       g1h->phase_times()->record_prepare_merge_heap_roots_time(total.seconds() * 1000.0);
1216     } else {
1217       g1h->phase_times()->record_or_add_optional_prepare_merge_heap_roots_time(total.seconds() * 1000.0);
1218     }
1219   }
1220 
1221   WorkGang* workers = g1h->workers();
1222   size_t const increment_length = g1h->collection_set()->increment_length();
1223 
1224   uint const num_workers = initial_evacuation ? workers->active_workers() :
1225                                                 MIN2(workers->active_workers(), (uint)increment_length);
1226 
1227   {
1228     G1MergeHeapRootsTask cl(_scan_state, num_workers, initial_evacuation);
1229     log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " regions",
1230                         cl.name(), num_workers, increment_length);
1231     workers->run_task(&cl, num_workers);
1232   }
1233 
1234   if (log_is_enabled(Debug, gc, remset)) {
1235     print_merge_heap_roots_stats();
1236   }
1237 }
1238 
1239 void G1RemSet::prepare_for_scan_heap_roots(uint region_idx) {
1240   _scan_state->clear_scan_top(region_idx);
1241 }
1242 
1243 void G1RemSet::cleanup_after_scan_heap_roots() {
1244   G1GCPhaseTimes* phase_times = _g1h->phase_times();
1245 
1246   // Set all cards back to clean.
1247   double start = os::elapsedTime();
1248   _scan_state->cleanup(_g1h->workers());
1249   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
1250 }
1251 
1252 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
1253 #ifdef ASSERT
1254   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1255   assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
1256          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
1257          p2i(card_ptr),
1258          ct->index_for(ct->addr_for(card_ptr)),
1259          p2i(ct->addr_for(card_ptr)),
1260          g1h->addr_to_region(ct->addr_for(card_ptr)));
1261 #endif
1262 }
1263 
1264 bool G1RemSet::clean_card_before_refine(CardValue*& card_ptr
1265                                         DEBUG_ONLY(COMMA KVHashtable<CardTable::CardValue* COMMA HeapWord* COMMA mtGC>& top_map)) {
1266   assert(!_g1h->is_gc_active(), "Only call concurrently");
1267 
1268   // Find the start address represented by the card.
1269   HeapWord* start = _ct->addr_for(card_ptr);
1270   // And find the region containing it.
1271   HeapRegion* r = _g1h->heap_region_containing_or_null(start);
1272 
1273   // If this is a (stale) card into an uncommitted region, exit.
1274   if (r == NULL) {
1275     return false;
1276   }
1277 
1278   check_card_ptr(card_ptr, _ct);
1279 
1280   // If the card is no longer dirty, nothing to do.
1281   // We cannot load the card value before the "r == NULL" check, because G1
1282   // could uncommit parts of the card table covering uncommitted regions.
1283   if (*card_ptr != G1CardTable::dirty_card_val()) {
1284     return false;
1285   }
1286 
1287   // This check is needed for some uncommon cases where we should
1288   // ignore the card.
1289   //
1290   // The region could be young.  Cards for young regions are
1291   // distinctly marked (set to g1_young_gen), so the post-barrier will
1292   // filter them out.  However, that marking is performed
1293   // concurrently.  A write to a young object could occur before the
1294   // card has been marked young, slipping past the filter.
1295   //
1296   // The card could be stale, because the region has been freed since
1297   // the card was recorded. In this case the region type could be
1298   // anything.  If (still) free or (reallocated) young, just ignore
1299   // it.  If (reallocated) old or humongous, the later card trimming
1300   // and additional checks in iteration may detect staleness.  At
1301   // worst, we end up processing a stale card unnecessarily.
1302   //
1303   // In the normal (non-stale) case, the synchronization between the
1304   // enqueueing of the card and processing it here will have ensured
1305   // we see the up-to-date region type here.
1306   if (!r->is_old_or_humongous_or_archive()) {
1307     return false;
1308   }
1309 
1310   // The result from the hot card cache insert call is either:
1311   //   * pointer to the current card
1312   //     (implying that the current card is not 'hot'),
1313   //   * null
1314   //     (meaning we had inserted the card ptr into the "hot" card cache,
1315   //     which had some headroom),
1316   //   * a pointer to a "hot" card that was evicted from the "hot" cache.
1317   //
1318 
1319   if (_hot_card_cache->use_cache()) {
1320     assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
1321 
1322     const CardValue* orig_card_ptr = card_ptr;
1323     card_ptr = _hot_card_cache->insert(card_ptr);
1324     if (card_ptr == NULL) {
1325       // There was no eviction. Nothing to do.
1326       return false;
1327     } else if (card_ptr != orig_card_ptr) {
1328       // Original card was inserted and an old card was evicted.
1329       start = _ct->addr_for(card_ptr);
1330       r = _g1h->heap_region_containing(start);
1331 
1332       // Check whether the region formerly in the cache should be
1333       // ignored, as discussed earlier for the original card.  The
1334       // region could have been freed while in the cache.
1335       if (!r->is_old_or_humongous_or_archive()) {
1336         return false;
1337       }
1338     } // Else we still have the original card.
1339   }
1340 
1341   // Trim the region designated by the card to what's been allocated
1342   // in the region.  The card could be stale, or the card could cover
1343   // (part of) an object at the end of the allocated space and extend
1344   // beyond the end of allocation.
1345 
1346   // Non-humongous objects are only allocated in the old-gen during
1347   // GC, so if region is old then top is stable.  Humongous object
1348   // allocation sets top last; if top has not yet been set, this is
1349   // a stale card and we'll end up with an empty intersection.  If
1350   // this is not a stale card, the synchronization between the
1351   // enqueuing of the card and processing it here will have ensured
1352   // we see the up-to-date top here.
1353   HeapWord* scan_limit = r->top();
1354 
1355   if (scan_limit <= start) {
1356     // If the trimmed region is empty, the card must be stale.
1357     return false;
1358   }
1359 
1360   // Okay to clean and process the card now.  There are still some
1361   // stale card cases that may be detected by iteration and dealt with
1362   // as iteration failure.
1363   *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
1364 
1365 #ifdef ASSERT
1366   HeapWord** existing_top = top_map.lookup(card_ptr);
1367   if (existing_top != NULL) {
1368     assert(scan_limit == *existing_top, "top must be stable");
1369   } else {
1370     top_map.add(card_ptr, scan_limit);
1371   }
1372 #endif
1373 
1374   return true;
1375 }
1376 
1377 void G1RemSet::refine_card_concurrently(CardValue* const card_ptr,
1378                                         const uint worker_id
1379                                         DEBUG_ONLY(COMMA KVHashtable<CardTable::CardValue* COMMA HeapWord* COMMA mtGC>& top_map)) {
1380   assert(!_g1h->is_gc_active(), "Only call concurrently");
1381   check_card_ptr(card_ptr, _ct);
1382 
1383   // Construct the MemRegion representing the card.
1384   HeapWord* start = _ct->addr_for(card_ptr);
1385   // And find the region containing it.
1386   HeapRegion* r = _g1h->heap_region_containing(start);
1387   // This reload of the top is safe even though it happens after the full
1388   // fence, because top is stable for unfiltered humongous regions, so it must
1389   // return the same value as the previous load when cleaning the card.
1390   HeapWord* scan_limit = r->top();
1391   assert(scan_limit == *top_map.lookup(card_ptr), "top must be stable");
1392 
1393   // Don't use addr_for(card_ptr + 1) which can ask for
1394   // a card beyond the heap.
1395   HeapWord* end = start + G1CardTable::card_size_in_words;
1396   MemRegion dirty_region(start, MIN2(scan_limit, end));
1397   assert(!dirty_region.is_empty(), "sanity");
1398 
1399   G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id);
1400   if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
1401     return;
1402   }
1403 
1404   // If unable to process the card then we encountered an unparsable
1405   // part of the heap (e.g. a partially allocated object, so only
1406   // temporarily a problem) while processing a stale card.  Despite
1407   // the card being stale, we can't simply ignore it, because we've
1408   // already marked the card cleaned, so taken responsibility for
1409   // ensuring the card gets scanned.
1410   //
1411   // However, the card might have gotten re-dirtied and re-enqueued
1412   // while we worked.  (In fact, it's pretty likely.)
1413   if (*card_ptr == G1CardTable::dirty_card_val()) {
1414     return;
1415   }
1416 
1417   // Re-dirty the card and enqueue in the *shared* queue.  Can't use
1418   // the thread-local queue, because that might be the queue that is
1419   // being processed by us; we could be a Java thread conscripted to
1420   // perform refinement on our queue's current buffer.
1421   *card_ptr = G1CardTable::dirty_card_val();
1422   G1BarrierSet::shared_dirty_card_queue().enqueue(card_ptr);
1423 }
1424 
1425 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
1426   if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
1427       (period_count % G1SummarizeRSetStatsPeriod == 0)) {
1428 
1429     G1RemSetSummary current;
1430     _prev_period_summary.subtract_from(&current);
1431 
1432     Log(gc, remset) log;
1433     log.trace("%s", header);
1434     ResourceMark rm;
1435     LogStream ls(log.trace());
1436     _prev_period_summary.print_on(&ls);
1437 
1438     _prev_period_summary.set(&current);
1439   }
1440 }
1441 
1442 void G1RemSet::print_summary_info() {
1443   Log(gc, remset, exit) log;
1444   if (log.is_trace()) {
1445     log.trace(" Cumulative RS summary");
1446     G1RemSetSummary current;
1447     ResourceMark rm;
1448     LogStream ls(log.trace());
1449     current.print_on(&ls);
1450   }
1451 }
1452 
1453 class G1RebuildRemSetTask: public AbstractGangTask {
1454   // Aggregate the counting data that was constructed concurrently
1455   // with marking.
1456   class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure {
1457     G1ConcurrentMark* _cm;
1458     G1RebuildRemSetClosure _update_cl;
1459 
1460     // Applies _update_cl to the references of the given object, limiting objArrays
1461     // to the given MemRegion. Returns the amount of words actually scanned.
1462     size_t scan_for_references(oop const obj, MemRegion mr) {
1463       size_t const obj_size = obj->size();
1464       // All non-objArrays and objArrays completely within the mr
1465       // can be scanned without passing the mr.
1466       if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) {
1467         obj->oop_iterate(&_update_cl);
1468         return obj_size;
1469       }
1470       // This path is for objArrays crossing the given MemRegion. Only scan the
1471       // area within the MemRegion.
1472       obj->oop_iterate(&_update_cl, mr);
1473       return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size();
1474     }
1475 
1476     // A humongous object is live (with respect to the scanning) either
1477     // a) it is marked on the bitmap as such
1478     // b) its TARS is larger than TAMS, i.e. has been allocated during marking.
1479     bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const {
1480       return bitmap->is_marked(humongous_obj) || (tars > tams);
1481     }
1482 
1483     // Iterator over the live objects within the given MemRegion.
1484     class LiveObjIterator : public StackObj {
1485       const G1CMBitMap* const _bitmap;
1486       const HeapWord* _tams;
1487       const MemRegion _mr;
1488       HeapWord* _current;
1489 
1490       bool is_below_tams() const {
1491         return _current < _tams;
1492       }
1493 
1494       bool is_live(HeapWord* obj) const {
1495         return !is_below_tams() || _bitmap->is_marked(obj);
1496       }
1497 
1498       HeapWord* bitmap_limit() const {
1499         return MIN2(const_cast<HeapWord*>(_tams), _mr.end());
1500       }
1501 
1502       void move_if_below_tams() {
1503         if (is_below_tams() && has_next()) {
1504           _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
1505         }
1506       }
1507     public:
1508       LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) :
1509           _bitmap(bitmap),
1510           _tams(tams),
1511           _mr(mr),
1512           _current(first_oop_into_mr) {
1513 
1514         assert(_current <= _mr.start(),
1515                "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")",
1516                p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end()));
1517 
1518         // Step to the next live object within the MemRegion if needed.
1519         if (is_live(_current)) {
1520           // Non-objArrays were scanned by the previous part of that region.
1521           if (_current < mr.start() && !oop(_current)->is_objArray()) {
1522             _current += oop(_current)->size();
1523             // We might have positioned _current on a non-live object. Reposition to the next
1524             // live one if needed.
1525             move_if_below_tams();
1526           }
1527         } else {
1528           // The object at _current can only be dead if below TAMS, so we can use the bitmap.
1529           // immediately.
1530           _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
1531           assert(_current == _mr.end() || is_live(_current),
1532                  "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")",
1533                  p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end()));
1534         }
1535       }
1536 
1537       void move_to_next() {
1538         _current += next()->size();
1539         move_if_below_tams();
1540       }
1541 
1542       oop next() const {
1543         oop result = oop(_current);
1544         assert(is_live(_current),
1545                "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d",
1546                p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result));
1547         return result;
1548       }
1549 
1550       bool has_next() const {
1551         return _current < _mr.end();
1552       }
1553     };
1554 
1555     // Rebuild remembered sets in the part of the region specified by mr and hr.
1556     // Objects between the bottom of the region and the TAMS are checked for liveness
1557     // using the given bitmap. Objects between TAMS and TARS are assumed to be live.
1558     // Returns the number of live words between bottom and TAMS.
1559     size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap,
1560                                      HeapWord* const top_at_mark_start,
1561                                      HeapWord* const top_at_rebuild_start,
1562                                      HeapRegion* hr,
1563                                      MemRegion mr) {
1564       size_t marked_words = 0;
1565 
1566       if (hr->is_humongous()) {
1567         oop const humongous_obj = oop(hr->humongous_start_region()->bottom());
1568         if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) {
1569           // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start);
1570           // however in case of humongous objects it is sufficient to scan the encompassing
1571           // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the
1572           // two areas will be zero sized. I.e. TAMS is either
1573           // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different
1574           // value: this would mean that TAMS points somewhere into the object.
1575           assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
1576                  "More than one object in the humongous region?");
1577           humongous_obj->oop_iterate(&_update_cl, mr);
1578           return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0;
1579         } else {
1580           return 0;
1581         }
1582       }
1583 
1584       for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
1585         oop obj = it.next();
1586         size_t scanned_size = scan_for_references(obj, mr);
1587         if ((HeapWord*)obj < top_at_mark_start) {
1588           marked_words += scanned_size;
1589         }
1590       }
1591 
1592       return marked_words * HeapWordSize;
1593     }
1594 public:
1595   G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h,
1596                                    G1ConcurrentMark* cm,
1597                                    uint worker_id) :
1598     HeapRegionClosure(),
1599     _cm(cm),
1600     _update_cl(g1h, worker_id) { }
1601 
1602     bool do_heap_region(HeapRegion* hr) {
1603       if (_cm->has_aborted()) {
1604         return true;
1605       }
1606 
1607       uint const region_idx = hr->hrm_index();
1608       DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);)
1609       assert(top_at_rebuild_start_check == NULL ||
1610              top_at_rebuild_start_check > hr->bottom(),
1611              "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)",
1612              p2i(top_at_rebuild_start_check), p2i(hr->bottom()),  region_idx, hr->get_type_str());
1613 
1614       size_t total_marked_bytes = 0;
1615       size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize;
1616 
1617       HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start();
1618 
1619       HeapWord* cur = hr->bottom();
1620       while (cur < hr->end()) {
1621         // After every iteration (yield point) we need to check whether the region's
1622         // TARS changed due to e.g. eager reclaim.
1623         HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);
1624         if (top_at_rebuild_start == NULL) {
1625           return false;
1626         }
1627 
1628         MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words));
1629         if (next_chunk.is_empty()) {
1630           break;
1631         }
1632 
1633         const Ticks start = Ticks::now();
1634         size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(),
1635                                                         top_at_mark_start,
1636                                                         top_at_rebuild_start,
1637                                                         hr,
1638                                                         next_chunk);
1639         Tickspan time = Ticks::now() - start;
1640 
1641         log_trace(gc, remset, tracking)("Rebuilt region %u "
1642                                         "live " SIZE_FORMAT " "
1643                                         "time %.3fms "
1644                                         "marked bytes " SIZE_FORMAT " "
1645                                         "bot " PTR_FORMAT " "
1646                                         "TAMS " PTR_FORMAT " "
1647                                         "TARS " PTR_FORMAT,
1648                                         region_idx,
1649                                         _cm->liveness(region_idx) * HeapWordSize,
1650                                         time.seconds() * 1000.0,
1651                                         marked_bytes,
1652                                         p2i(hr->bottom()),
1653                                         p2i(top_at_mark_start),
1654                                         p2i(top_at_rebuild_start));
1655 
1656         if (marked_bytes > 0) {
1657           total_marked_bytes += marked_bytes;
1658         }
1659         cur += chunk_size_in_words;
1660 
1661         _cm->do_yield_check();
1662         if (_cm->has_aborted()) {
1663           return true;
1664         }
1665       }
1666       // In the final iteration of the loop the region might have been eagerly reclaimed.
1667       // Simply filter out those regions. We can not just use region type because there
1668       // might have already been new allocations into these regions.
1669       DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);)
1670       assert(top_at_rebuild_start == NULL ||
1671              total_marked_bytes == hr->marked_bytes(),
1672              "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " "
1673              "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")",
1674              total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(),
1675              p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start));
1676        // Abort state may have changed after the yield check.
1677       return _cm->has_aborted();
1678     }
1679   };
1680 
1681   HeapRegionClaimer _hr_claimer;
1682   G1ConcurrentMark* _cm;
1683 
1684   uint _worker_id_offset;
1685 public:
1686   G1RebuildRemSetTask(G1ConcurrentMark* cm,
1687                       uint n_workers,
1688                       uint worker_id_offset) :
1689       AbstractGangTask("G1 Rebuild Remembered Set"),
1690       _hr_claimer(n_workers),
1691       _cm(cm),
1692       _worker_id_offset(worker_id_offset) {
1693   }
1694 
1695   void work(uint worker_id) {
1696     SuspendibleThreadSetJoiner sts_join;
1697 
1698     G1CollectedHeap* g1h = G1CollectedHeap::heap();
1699 
1700     G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id);
1701     g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
1702   }
1703 };
1704 
1705 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm,
1706                                WorkGang* workers,
1707                                uint worker_id_offset) {
1708   uint num_workers = workers->active_workers();
1709 
1710   G1RebuildRemSetTask cl(cm,
1711                          num_workers,
1712                          worker_id_offset);
1713   workers->run_task(&cl, num_workers);
1714 }