1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BarrierSet.hpp"
  27 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc/g1/g1CardTable.inline.hpp"
  29 #include "gc/g1/g1CardTableEntryClosure.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1ConcurrentRefine.hpp"
  32 #include "gc/g1/g1DirtyCardQueue.hpp"
  33 #include "gc/g1/g1FromCardCache.hpp"
  34 #include "gc/g1/g1GCPhaseTimes.hpp"
  35 #include "gc/g1/g1HotCardCache.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RootClosures.hpp"
  38 #include "gc/g1/g1RemSet.hpp"
  39 #include "gc/g1/g1SharedDirtyCardQueue.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/g1/heapRegionManager.inline.hpp"
  42 #include "gc/g1/heapRegionRemSet.inline.hpp"
  43 #include "gc/g1/sparsePRT.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/ptrQueue.hpp"
  46 #include "gc/shared/suspendibleThreadSet.hpp"
  47 #include "jfr/jfrEvents.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/access.inline.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/os.hpp"
  54 #include "utilities/align.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 #include "utilities/ticks.hpp"
  58 
  59 // Collects information about the overall heap root scan progress during an evacuation.
  60 //
  61 // Scanning the remembered sets works by first merging all sources of cards to be
  62 // scanned (log buffers, hcc, remembered sets) into a single data structure to remove
  63 // duplicates and simplify work distribution.
  64 //
  65 // During the following card scanning we not only scan this combined set of cards, but
  66 // also remember that these were completely scanned. The following evacuation passes
  67 // do not scan these cards again, and so need to be preserved across increments.
  68 //
  69 // The representation for all the cards to scan is the card table: cards can have
  70 // one of three states during GC:
  71 // - clean: these cards will not be scanned in this pass
  72 // - dirty: these cards will be scanned in this pass
  73 // - scanned: these cards have already been scanned in a previous pass
  74 //
  75 // After all evacuation is done, we reset the card table to clean.
  76 //
  77 // Work distribution occurs on "chunk" basis, i.e. contiguous ranges of cards. As an
  78 // additional optimization, during card merging we remember which regions and which
  79 // chunks actually contain cards to be scanned. Threads iterate only across these
  80 // regions, and only compete for chunks containing any cards.
  81 //
  82 // Within these chunks, a worker scans the card table on "blocks" of cards, i.e.
  83 // contiguous ranges of dirty cards to be scanned. These blocks are converted to actual
  84 // memory ranges and then passed on to actual scanning.
  85 class G1RemSetScanState : public CHeapObj<mtGC> {
  86   class G1DirtyRegions;
  87 
  88   size_t _max_regions;
  89 
  90   // Has this region that is part of the regions in the collection set been processed yet.
  91   typedef bool G1RemsetIterState;
  92 
  93   G1RemsetIterState volatile* _collection_set_iter_state;
  94 
  95   // Card table iteration claim for each heap region, from 0 (completely unscanned)
  96   // to (>=) HeapRegion::CardsPerRegion (completely scanned).
  97   uint volatile* _card_table_scan_state;
  98 
  99   // Return "optimal" number of chunks per region we want to use for claiming areas
 100   // within a region to claim. Dependent on the region size as proxy for the heap
 101   // size, we limit the total number of chunks to limit memory usage and maintenance
 102   // effort of that table vs. granularity of distributing scanning work.
 103   // Testing showed that 8 for 1M/2M region, 16 for 4M/8M regions, 32 for 16/32M regions
 104   // seems to be such a good trade-off.
 105   static uint get_chunks_per_region(uint log_region_size) {
 106     // Limit the expected input values to current known possible values of the
 107     // (log) region size. Adjust as necessary after testing if changing the permissible
 108     // values for region size.
 109     assert(log_region_size >= 20 && log_region_size <= 25,
 110            "expected value in [20,25], but got %u", log_region_size);
 111     return 1u << (log_region_size / 2 - 7);
 112   }
 113 
 114   uint _scan_chunks_per_region;         // Number of chunks per region.
 115   uint8_t _log_scan_chunks_per_region;  // Log of number of chunks per region.
 116   bool* _region_scan_chunks;
 117   size_t _num_total_scan_chunks;        // Total number of elements in _region_scan_chunks.
 118   uint8_t _scan_chunks_shift;           // For conversion between card index and chunk index.
 119 public:
 120   uint scan_chunk_size() const { return (uint)1 << _scan_chunks_shift; }
 121 
 122   // Returns whether the chunk corresponding to the given region/card in region contain a
 123   // dirty card, i.e. actually needs scanning.
 124   bool chunk_needs_scan(uint const region_idx, uint const card_in_region) const {
 125     size_t const idx = ((size_t)region_idx << _log_scan_chunks_per_region) + (card_in_region >> _scan_chunks_shift);
 126     assert(idx < _num_total_scan_chunks, "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT,
 127            idx, _num_total_scan_chunks);
 128     return _region_scan_chunks[idx];
 129   }
 130 
 131 private:
 132   // The complete set of regions which card table needs to be cleared at the end of GC because
 133   // we scribbled all over them.
 134   G1DirtyRegions* _all_dirty_regions;
 135   // The set of regions which card table needs to be scanned for new dirty cards
 136   // in the current evacuation pass.
 137   G1DirtyRegions* _next_dirty_regions;
 138 
 139   // Set of (unique) regions that can be added to concurrently.
 140   class G1DirtyRegions : public CHeapObj<mtGC> {
 141     uint* _buffer;
 142     uint _cur_idx;
 143     size_t _max_regions;
 144 
 145     bool* _contains;
 146 
 147   public:
 148     G1DirtyRegions(size_t max_regions) :
 149       _buffer(NEW_C_HEAP_ARRAY(uint, max_regions, mtGC)),
 150       _cur_idx(0),
 151       _max_regions(max_regions),
 152       _contains(NEW_C_HEAP_ARRAY(bool, max_regions, mtGC)) {
 153 
 154       reset();
 155     }
 156 
 157     static size_t chunk_size() { return M; }
 158 
 159     ~G1DirtyRegions() {
 160       FREE_C_HEAP_ARRAY(uint, _buffer);
 161       FREE_C_HEAP_ARRAY(bool, _contains);
 162     }
 163 
 164     void reset() {
 165       _cur_idx = 0;
 166       ::memset(_contains, false, _max_regions * sizeof(bool));
 167     }
 168 
 169     uint size() const { return _cur_idx; }
 170 
 171     uint at(uint idx) const {
 172       assert(idx < _cur_idx, "Index %u beyond valid regions", idx);
 173       return _buffer[idx];
 174     }
 175 
 176     void add_dirty_region(uint region) {
 177       if (_contains[region]) {
 178         return;
 179       }
 180 
 181       bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
 182       if (marked_as_dirty) {
 183         uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
 184         _buffer[allocated] = region;
 185       }
 186     }
 187 
 188     // Creates the union of this and the other G1DirtyRegions.
 189     void merge(const G1DirtyRegions* other) {
 190       for (uint i = 0; i < other->size(); i++) {
 191         uint region = other->at(i);
 192         if (!_contains[region]) {
 193           _buffer[_cur_idx++] = region;
 194           _contains[region] = true;
 195         }
 196       }
 197     }
 198   };
 199 
 200   // Creates a snapshot of the current _top values at the start of collection to
 201   // filter out card marks that we do not want to scan.
 202   class G1ResetScanTopClosure : public HeapRegionClosure {
 203     G1RemSetScanState* _scan_state;
 204 
 205   public:
 206     G1ResetScanTopClosure(G1RemSetScanState* scan_state) : _scan_state(scan_state) { }
 207 
 208     virtual bool do_heap_region(HeapRegion* r) {
 209       uint hrm_index = r->hrm_index();
 210       if (r->in_collection_set()) {
 211         // Young regions had their card table marked as young at their allocation;
 212         // we need to make sure that these marks are cleared at the end of GC, *but*
 213         // they should not be scanned for cards.
 214         // So directly add them to the "all_dirty_regions".
 215         // Same for regions in the (initial) collection set: they may contain cards from
 216         // the log buffers, make sure they are cleaned.
 217         _scan_state->add_all_dirty_region(hrm_index);
 218        } else if (r->is_old_or_humongous_or_archive()) {
 219         _scan_state->set_scan_top(hrm_index, r->top());
 220        }
 221        return false;
 222      }
 223   };
 224   // For each region, contains the maximum top() value to be used during this garbage
 225   // collection. Subsumes common checks like filtering out everything but old and
 226   // humongous regions outside the collection set.
 227   // This is valid because we are not interested in scanning stray remembered set
 228   // entries from free or archive regions.
 229   HeapWord** _scan_top;
 230 
 231   class G1ClearCardTableTask : public AbstractGangTask {
 232     G1CollectedHeap* _g1h;
 233     G1DirtyRegions* _regions;
 234     uint _chunk_length;
 235 
 236     uint volatile _cur_dirty_regions;
 237 
 238     G1RemSetScanState* _scan_state;
 239 
 240   public:
 241     G1ClearCardTableTask(G1CollectedHeap* g1h,
 242                          G1DirtyRegions* regions,
 243                          uint chunk_length,
 244                          G1RemSetScanState* scan_state) :
 245       AbstractGangTask("G1 Clear Card Table Task"),
 246       _g1h(g1h),
 247       _regions(regions),
 248       _chunk_length(chunk_length),
 249       _cur_dirty_regions(0),
 250       _scan_state(scan_state) {
 251 
 252       assert(chunk_length > 0, "must be");
 253     }
 254 
 255     static uint chunk_size() { return M; }
 256 
 257     void work(uint worker_id) {
 258       while (_cur_dirty_regions < _regions->size()) {
 259         uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
 260         uint max = MIN2(next + _chunk_length, _regions->size());
 261 
 262         for (uint i = next; i < max; i++) {
 263           HeapRegion* r = _g1h->region_at(_regions->at(i));
 264           if (!r->is_survivor()) {
 265             r->clear_cardtable();
 266           }
 267         }
 268       }
 269     }
 270   };
 271 
 272   // Clear the card table of "dirty" regions.
 273   void clear_card_table(WorkGang* workers) {
 274     uint num_regions = _all_dirty_regions->size();
 275 
 276     if (num_regions == 0) {
 277       return;
 278     }
 279 
 280     uint const num_chunks = (uint)(align_up((size_t)num_regions << HeapRegion::LogCardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size());
 281     uint const num_workers = MIN2(num_chunks, workers->active_workers());
 282     uint const chunk_length = G1ClearCardTableTask::chunk_size() / (uint)HeapRegion::CardsPerRegion;
 283 
 284     // Iterate over the dirty cards region list.
 285     G1ClearCardTableTask cl(G1CollectedHeap::heap(), _all_dirty_regions, chunk_length, this);
 286 
 287     log_debug(gc, ergo)("Running %s using %u workers for %u "
 288                         "units of work for %u regions.",
 289                         cl.name(), num_workers, num_chunks, num_regions);
 290     workers->run_task(&cl, num_workers);
 291 
 292 #ifndef PRODUCT
 293     G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
 294 #endif
 295   }
 296 
 297 public:
 298   G1RemSetScanState() :
 299     _max_regions(0),
 300     _collection_set_iter_state(NULL),
 301     _card_table_scan_state(NULL),
 302     _scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)),
 303     _log_scan_chunks_per_region(log2_uint(_scan_chunks_per_region)),
 304     _region_scan_chunks(NULL),
 305     _num_total_scan_chunks(0),
 306     _scan_chunks_shift(0),
 307     _all_dirty_regions(NULL),
 308     _next_dirty_regions(NULL),
 309     _scan_top(NULL) {
 310   }
 311 
 312   ~G1RemSetScanState() {
 313     FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state);
 314     FREE_C_HEAP_ARRAY(uint, _card_table_scan_state);
 315     FREE_C_HEAP_ARRAY(bool, _region_scan_chunks);
 316     FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
 317   }
 318 
 319   void initialize(size_t max_regions) {
 320     assert(_collection_set_iter_state == NULL, "Must not be initialized twice");
 321     _max_regions = max_regions;
 322     _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
 323     _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
 324     _num_total_scan_chunks = max_regions * _scan_chunks_per_region;
 325     _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC);
 326 
 327     _scan_chunks_shift = (uint8_t)log2_intptr(HeapRegion::CardsPerRegion / _scan_chunks_per_region);
 328     _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC);
 329   }
 330 
 331   void prepare() {
 332     for (size_t i = 0; i < _max_regions; i++) {
 333       _collection_set_iter_state[i] = false;
 334       clear_scan_top((uint)i);
 335     }
 336 
 337     _all_dirty_regions = new G1DirtyRegions(_max_regions);
 338     _next_dirty_regions = new G1DirtyRegions(_max_regions);
 339 
 340     G1ResetScanTopClosure cl(this);
 341     G1CollectedHeap::heap()->heap_region_iterate(&cl);
 342   }
 343 
 344   void prepare_for_merge_heap_roots() {
 345     _all_dirty_regions->merge(_next_dirty_regions);
 346 
 347     _next_dirty_regions->reset();
 348     for (size_t i = 0; i < _max_regions; i++) {
 349       _card_table_scan_state[i] = 0;
 350     }
 351 
 352     ::memset(_region_scan_chunks, false, _num_total_scan_chunks * sizeof(*_region_scan_chunks));
 353   }
 354 
 355   // Returns whether the given region contains cards we need to scan. The remembered
 356   // set and other sources may contain cards that
 357   // - are in uncommitted regions
 358   // - are located in the collection set
 359   // - are located in free regions
 360   // as we do not clean up remembered sets before merging heap roots.
 361   bool contains_cards_to_process(uint const region_idx) const {
 362     HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
 363     return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive());
 364   }
 365 
 366   size_t num_visited_cards() const {
 367     size_t result = 0;
 368     for (uint i = 0; i < _num_total_scan_chunks; i++) {
 369       if (_region_scan_chunks[i]) {
 370         result++;
 371       }
 372     }
 373     return result * (HeapRegion::CardsPerRegion / _scan_chunks_per_region);
 374   }
 375 
 376   size_t num_cards_in_dirty_regions() const {
 377     return _next_dirty_regions->size() * HeapRegion::CardsPerRegion;
 378   }
 379 
 380   void set_chunk_region_dirty(size_t const region_card_idx) {
 381     size_t chunk_idx = region_card_idx >> _scan_chunks_shift;
 382     for (uint i = 0; i < _scan_chunks_per_region; i++) {
 383       _region_scan_chunks[chunk_idx++] = true;
 384     }
 385   }
 386 
 387   void set_chunk_dirty(size_t const card_idx) {
 388     assert((card_idx >> _scan_chunks_shift) < _num_total_scan_chunks,
 389            "Trying to access index " SIZE_FORMAT " out of bounds " SIZE_FORMAT,
 390            card_idx >> _scan_chunks_shift, _num_total_scan_chunks);
 391     size_t const chunk_idx = card_idx >> _scan_chunks_shift;
 392     if (!_region_scan_chunks[chunk_idx]) {
 393       _region_scan_chunks[chunk_idx] = true;
 394     }
 395   }
 396 
 397   void cleanup(WorkGang* workers) {
 398     _all_dirty_regions->merge(_next_dirty_regions);
 399 
 400     clear_card_table(workers);
 401 
 402     delete _all_dirty_regions;
 403     _all_dirty_regions = NULL;
 404 
 405     delete _next_dirty_regions;
 406     _next_dirty_regions = NULL;
 407   }
 408 
 409   void iterate_dirty_regions_from(HeapRegionClosure* cl, uint worker_id) {
 410     uint num_regions = _next_dirty_regions->size();
 411 
 412     if (num_regions == 0) {
 413       return;
 414     }
 415 
 416     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 417 
 418     WorkGang* workers = g1h->workers();
 419     uint const max_workers = workers->active_workers();
 420 
 421     uint const start_pos = num_regions * worker_id / max_workers;
 422     uint cur = start_pos;
 423 
 424     do {
 425       bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur)));
 426       guarantee(!result, "Not allowed to ask for early termination.");
 427       cur++;
 428       if (cur == _next_dirty_regions->size()) {
 429         cur = 0;
 430       }
 431     } while (cur != start_pos);
 432   }
 433 
 434   // Attempt to claim the given region in the collection set for iteration. Returns true
 435   // if this call caused the transition from Unclaimed to Claimed.
 436   inline bool claim_collection_set_region(uint region) {
 437     assert(region < _max_regions, "Tried to access invalid region %u", region);
 438     if (_collection_set_iter_state[region]) {
 439       return false;
 440     }
 441     return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true);
 442   }
 443 
 444   bool has_cards_to_scan(uint region) {
 445     assert(region < _max_regions, "Tried to access invalid region %u", region);
 446     return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
 447   }
 448 
 449   uint claim_cards_to_scan(uint region, uint increment) {
 450     assert(region < _max_regions, "Tried to access invalid region %u", region);
 451     return Atomic::add(&_card_table_scan_state[region], increment) - increment;
 452   }
 453 
 454   void add_dirty_region(uint const region) {
 455 #ifdef ASSERT
 456    HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
 457    assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(),
 458           "Region %u is not suitable for scanning, is %sin collection set or %s",
 459           hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str());
 460 #endif
 461     _next_dirty_regions->add_dirty_region(region);
 462   }
 463 
 464   void add_all_dirty_region(uint region) {
 465 #ifdef ASSERT
 466     HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
 467     assert(hr->in_collection_set(),
 468            "Only add young regions to all dirty regions directly but %u is %s",
 469            hr->hrm_index(), hr->get_short_type_str());
 470 #endif
 471     _all_dirty_regions->add_dirty_region(region);
 472   }
 473 
 474   void set_scan_top(uint region_idx, HeapWord* value) {
 475     _scan_top[region_idx] = value;
 476   }
 477 
 478   HeapWord* scan_top(uint region_idx) const {
 479     return _scan_top[region_idx];
 480   }
 481 
 482   void clear_scan_top(uint region_idx) {
 483     set_scan_top(region_idx, NULL);
 484   }
 485 };
 486 
 487 G1RemSet::G1RemSet(G1CollectedHeap* g1h,
 488                    G1CardTable* ct,
 489                    G1HotCardCache* hot_card_cache) :
 490   _scan_state(new G1RemSetScanState()),
 491   _prev_period_summary(false),
 492   _g1h(g1h),
 493   _ct(ct),
 494   _g1p(_g1h->policy()),
 495   _hot_card_cache(hot_card_cache) {
 496 }
 497 
 498 G1RemSet::~G1RemSet() {
 499   delete _scan_state;
 500 }
 501 
 502 uint G1RemSet::num_par_rem_sets() {
 503   return G1DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
 504 }
 505 
 506 void G1RemSet::initialize(size_t capacity, uint max_regions) {
 507   G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
 508   _scan_state->initialize(max_regions);
 509 }
 510 
 511 // Helper class to scan and detect ranges of cards that need to be scanned on the
 512 // card table.
 513 class G1CardTableScanner : public StackObj {
 514 public:
 515   typedef CardTable::CardValue CardValue;
 516 
 517 private:
 518   CardValue* const _base_addr;
 519 
 520   CardValue* _cur_addr;
 521   CardValue* const _end_addr;
 522 
 523   static const size_t ToScanMask = G1CardTable::g1_card_already_scanned;
 524   static const size_t ExpandedToScanMask = G1CardTable::WordAlreadyScanned;
 525 
 526   bool cur_addr_aligned() const {
 527     return ((uintptr_t)_cur_addr) % sizeof(size_t) == 0;
 528   }
 529 
 530   bool cur_card_is_dirty() const {
 531     CardValue value = *_cur_addr;
 532     return (value & ToScanMask) == 0;
 533   }
 534 
 535   bool cur_word_of_cards_contains_any_dirty_card() const {
 536     assert(cur_addr_aligned(), "Current address should be aligned");
 537     size_t const value = *(size_t*)_cur_addr;
 538     return (~value & ExpandedToScanMask) != 0;
 539   }
 540 
 541   bool cur_word_of_cards_all_dirty_cards() const {
 542     size_t const value = *(size_t*)_cur_addr;
 543     return value == G1CardTable::WordAllDirty;
 544   }
 545 
 546   size_t get_and_advance_pos() {
 547     _cur_addr++;
 548     return pointer_delta(_cur_addr, _base_addr, sizeof(CardValue)) - 1;
 549   }
 550 
 551 public:
 552   G1CardTableScanner(CardValue* start_card, size_t size) :
 553     _base_addr(start_card),
 554     _cur_addr(start_card),
 555     _end_addr(start_card + size) {
 556 
 557     assert(is_aligned(start_card, sizeof(size_t)), "Unaligned start addr " PTR_FORMAT, p2i(start_card));
 558     assert(is_aligned(size, sizeof(size_t)), "Unaligned size " SIZE_FORMAT, size);
 559   }
 560 
 561   size_t find_next_dirty() {
 562     while (!cur_addr_aligned()) {
 563       if (cur_card_is_dirty()) {
 564         return get_and_advance_pos();
 565       }
 566       _cur_addr++;
 567     }
 568 
 569     assert(cur_addr_aligned(), "Current address should be aligned now.");
 570     while (_cur_addr != _end_addr) {
 571       if (cur_word_of_cards_contains_any_dirty_card()) {
 572         for (size_t i = 0; i < sizeof(size_t); i++) {
 573           if (cur_card_is_dirty()) {
 574             return get_and_advance_pos();
 575           }
 576           _cur_addr++;
 577         }
 578         assert(false, "Should not reach here given we detected a dirty card in the word.");
 579       }
 580       _cur_addr += sizeof(size_t);
 581     }
 582     return get_and_advance_pos();
 583   }
 584 
 585   size_t find_next_non_dirty() {
 586     assert(_cur_addr <= _end_addr, "Not allowed to search for marks after area.");
 587 
 588     while (!cur_addr_aligned()) {
 589       if (!cur_card_is_dirty()) {
 590         return get_and_advance_pos();
 591       }
 592       _cur_addr++;
 593     }
 594 
 595     assert(cur_addr_aligned(), "Current address should be aligned now.");
 596     while (_cur_addr != _end_addr) {
 597       if (!cur_word_of_cards_all_dirty_cards()) {
 598         for (size_t i = 0; i < sizeof(size_t); i++) {
 599           if (!cur_card_is_dirty()) {
 600             return get_and_advance_pos();
 601           }
 602           _cur_addr++;
 603         }
 604         assert(false, "Should not reach here given we detected a non-dirty card in the word.");
 605       }
 606       _cur_addr += sizeof(size_t);
 607     }
 608     return get_and_advance_pos();
 609   }
 610 };
 611 
 612 // Helper class to claim dirty chunks within the card table.
 613 class G1CardTableChunkClaimer {
 614   G1RemSetScanState* _scan_state;
 615   uint _region_idx;
 616   uint _cur_claim;
 617 
 618 public:
 619   G1CardTableChunkClaimer(G1RemSetScanState* scan_state, uint region_idx) :
 620     _scan_state(scan_state),
 621     _region_idx(region_idx),
 622     _cur_claim(0) {
 623     guarantee(size() <= HeapRegion::CardsPerRegion, "Should not claim more space than possible.");
 624   }
 625 
 626   bool has_next() {
 627     while (true) {
 628       _cur_claim = _scan_state->claim_cards_to_scan(_region_idx, size());
 629       if (_cur_claim >= HeapRegion::CardsPerRegion) {
 630         return false;
 631       }
 632       if (_scan_state->chunk_needs_scan(_region_idx, _cur_claim)) {
 633         return true;
 634       }
 635     }
 636   }
 637 
 638   uint value() const { return _cur_claim; }
 639   uint size() const { return _scan_state->scan_chunk_size(); }
 640 };
 641 
 642 // Scans a heap region for dirty cards.
 643 class G1ScanHRForRegionClosure : public HeapRegionClosure {
 644   G1CollectedHeap* _g1h;
 645   G1CardTable* _ct;
 646   G1BlockOffsetTable* _bot;
 647 
 648   G1ParScanThreadState* _pss;
 649 
 650   G1RemSetScanState* _scan_state;
 651 
 652   G1GCPhaseTimes::GCParPhases _phase;
 653 
 654   uint   _worker_id;
 655 
 656   size_t _cards_scanned;
 657   size_t _blocks_scanned;
 658   size_t _chunks_claimed;
 659 
 660   Tickspan _rem_set_root_scan_time;
 661   Tickspan _rem_set_trim_partially_time;
 662 
 663   // The address to which this thread already scanned (walked the heap) up to during
 664   // card scanning (exclusive).
 665   HeapWord* _scanned_to;
 666 
 667   HeapWord* scan_memregion(uint region_idx_for_card, MemRegion mr) {
 668     HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
 669     G1ScanCardClosure card_cl(_g1h, _pss);
 670 
 671     HeapWord* const scanned_to = card_region->oops_on_memregion_seq_iterate_careful<true>(mr, &card_cl);
 672     assert(scanned_to != NULL, "Should be able to scan range");
 673     assert(scanned_to >= mr.end(), "Scanned to " PTR_FORMAT " less than range " PTR_FORMAT, p2i(scanned_to), p2i(mr.end()));
 674 
 675     _pss->trim_queue_partially();
 676     return scanned_to;
 677   }
 678 
 679   void do_claimed_block(uint const region_idx_for_card, size_t const first_card, size_t const num_cards) {
 680     HeapWord* const card_start = _bot->address_for_index_raw(first_card);
 681 #ifdef ASSERT
 682     HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card);
 683     assert(hr == NULL || hr->is_in_reserved(card_start),
 684              "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index());
 685 #endif
 686     HeapWord* const top = _scan_state->scan_top(region_idx_for_card);
 687     if (card_start >= top) {
 688       return;
 689     }
 690 
 691     HeapWord* scan_end = MIN2(card_start + (num_cards << BOTConstants::LogN_words), top);
 692     if (_scanned_to >= scan_end) {
 693       return;
 694     }
 695     MemRegion mr(MAX2(card_start, _scanned_to), scan_end);
 696     _scanned_to = scan_memregion(region_idx_for_card, mr);
 697 
 698     _cards_scanned += num_cards;
 699   }
 700 
 701   ALWAYSINLINE void do_card_block(uint const region_idx, size_t const first_card, size_t const num_cards) {
 702     _ct->mark_as_scanned(first_card, num_cards);
 703     do_claimed_block(region_idx, first_card, num_cards);
 704     _blocks_scanned++;
 705   }
 706 
 707    void scan_heap_roots(HeapRegion* r) {
 708     EventGCPhaseParallel event;
 709     uint const region_idx = r->hrm_index();
 710 
 711     ResourceMark rm;
 712 
 713     G1CardTableChunkClaimer claim(_scan_state, region_idx);
 714 
 715     // Set the current scan "finger" to NULL for every heap region to scan. Since
 716     // the claim value is monotonically increasing, the check to not scan below this
 717     // will filter out objects spanning chunks within the region too then, as opposed
 718     // to resetting this value for every claim.
 719     _scanned_to = NULL;
 720 
 721     while (claim.has_next()) {
 722       size_t const region_card_base_idx = ((size_t)region_idx << HeapRegion::LogCardsPerRegion) + claim.value();
 723       CardTable::CardValue* const base_addr = _ct->byte_for_index(region_card_base_idx);
 724 
 725       G1CardTableScanner scan(base_addr, claim.size());
 726 
 727       size_t first_scan_idx = scan.find_next_dirty();
 728       while (first_scan_idx != claim.size()) {
 729         assert(*_ct->byte_for_index(region_card_base_idx + first_scan_idx) <= 0x1, "is %d at region %u idx " SIZE_FORMAT, *_ct->byte_for_index(region_card_base_idx + first_scan_idx), region_idx, first_scan_idx);
 730 
 731         size_t const last_scan_idx = scan.find_next_non_dirty();
 732         size_t const len = last_scan_idx - first_scan_idx;
 733 
 734         do_card_block(region_idx, region_card_base_idx + first_scan_idx, len);
 735 
 736         if (last_scan_idx == claim.size()) {
 737           break;
 738         }
 739 
 740         first_scan_idx = scan.find_next_dirty();
 741       }
 742       _chunks_claimed++;
 743     }
 744 
 745     event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanHR));
 746   }
 747 
 748 public:
 749   G1ScanHRForRegionClosure(G1RemSetScanState* scan_state,
 750                            G1ParScanThreadState* pss,
 751                            uint worker_id,
 752                            G1GCPhaseTimes::GCParPhases phase) :
 753     _g1h(G1CollectedHeap::heap()),
 754     _ct(_g1h->card_table()),
 755     _bot(_g1h->bot()),
 756     _pss(pss),
 757     _scan_state(scan_state),
 758     _phase(phase),
 759     _worker_id(worker_id),
 760     _cards_scanned(0),
 761     _blocks_scanned(0),
 762     _chunks_claimed(0),
 763     _rem_set_root_scan_time(),
 764     _rem_set_trim_partially_time(),
 765     _scanned_to(NULL) {
 766   }
 767 
 768   bool do_heap_region(HeapRegion* r) {
 769     assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(),
 770            "Should only be called on old gen non-collection set regions but region %u is not.",
 771            r->hrm_index());
 772     uint const region_idx = r->hrm_index();
 773 
 774     if (_scan_state->has_cards_to_scan(region_idx)) {
 775       G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
 776       scan_heap_roots(r);
 777     }
 778     return false;
 779   }
 780 
 781   Tickspan rem_set_root_scan_time() const { return _rem_set_root_scan_time; }
 782   Tickspan rem_set_trim_partially_time() const { return _rem_set_trim_partially_time; }
 783 
 784   size_t cards_scanned() const { return _cards_scanned; }
 785   size_t blocks_scanned() const { return _blocks_scanned; }
 786   size_t chunks_claimed() const { return _chunks_claimed; }
 787 };
 788 
 789 void G1RemSet::scan_heap_roots(G1ParScanThreadState* pss,
 790                             uint worker_id,
 791                             G1GCPhaseTimes::GCParPhases scan_phase,
 792                             G1GCPhaseTimes::GCParPhases objcopy_phase) {
 793   G1ScanHRForRegionClosure cl(_scan_state, pss, worker_id, scan_phase);
 794   _scan_state->iterate_dirty_regions_from(&cl, worker_id);
 795 
 796   G1GCPhaseTimes* p = _g1p->phase_times();
 797 
 798   p->record_or_add_time_secs(objcopy_phase, worker_id, cl.rem_set_trim_partially_time().seconds());
 799 
 800   p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_root_scan_time().seconds());
 801   p->record_or_add_thread_work_item(scan_phase, worker_id, cl.cards_scanned(), G1GCPhaseTimes::ScanHRScannedCards);
 802   p->record_or_add_thread_work_item(scan_phase, worker_id, cl.blocks_scanned(), G1GCPhaseTimes::ScanHRScannedBlocks);
 803   p->record_or_add_thread_work_item(scan_phase, worker_id, cl.chunks_claimed(), G1GCPhaseTimes::ScanHRClaimedChunks);
 804 }
 805 
 806 // Heap region closure to be applied to all regions in the current collection set
 807 // increment to fix up non-card related roots.
 808 class G1ScanCollectionSetRegionClosure : public HeapRegionClosure {
 809   G1ParScanThreadState* _pss;
 810   G1RemSetScanState* _scan_state;
 811 
 812   G1GCPhaseTimes::GCParPhases _scan_phase;
 813   G1GCPhaseTimes::GCParPhases _code_roots_phase;
 814 
 815   uint _worker_id;
 816 
 817   size_t _opt_refs_scanned;
 818   size_t _opt_refs_memory_used;
 819 
 820   Tickspan _strong_code_root_scan_time;
 821   Tickspan _strong_code_trim_partially_time;
 822 
 823   Tickspan _rem_set_opt_root_scan_time;
 824   Tickspan _rem_set_opt_trim_partially_time;
 825 
 826   void scan_opt_rem_set_roots(HeapRegion* r) {
 827     EventGCPhaseParallel event;
 828 
 829     G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
 830 
 831     G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss);
 832     G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl);
 833     _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->strong_oops());
 834     _opt_refs_memory_used += opt_rem_set_list->used_memory();
 835 
 836     event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase));
 837   }
 838 
 839 public:
 840   G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state,
 841                                    G1ParScanThreadState* pss,
 842                                    uint worker_id,
 843                                    G1GCPhaseTimes::GCParPhases scan_phase,
 844                                    G1GCPhaseTimes::GCParPhases code_roots_phase) :
 845     _pss(pss),
 846     _scan_state(scan_state),
 847     _scan_phase(scan_phase),
 848     _code_roots_phase(code_roots_phase),
 849     _worker_id(worker_id),
 850     _opt_refs_scanned(0),
 851     _opt_refs_memory_used(0),
 852     _strong_code_root_scan_time(),
 853     _strong_code_trim_partially_time(),
 854     _rem_set_opt_root_scan_time(),
 855     _rem_set_opt_trim_partially_time() { }
 856 
 857   bool do_heap_region(HeapRegion* r) {
 858     uint const region_idx = r->hrm_index();
 859 
 860     // The individual references for the optional remembered set are per-worker, so we
 861     // always need to scan them.
 862     if (r->has_index_in_opt_cset()) {
 863       G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time);
 864       scan_opt_rem_set_roots(r);
 865     }
 866 
 867     if (_scan_state->claim_collection_set_region(region_idx)) {
 868       EventGCPhaseParallel event;
 869 
 870       G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time);
 871       // Scan the strong code root list attached to the current region
 872       r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
 873 
 874       event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_code_roots_phase));
 875     }
 876 
 877     return false;
 878   }
 879 
 880   Tickspan strong_code_root_scan_time() const { return _strong_code_root_scan_time;  }
 881   Tickspan strong_code_root_trim_partially_time() const { return _strong_code_trim_partially_time; }
 882 
 883   Tickspan rem_set_opt_root_scan_time() const { return _rem_set_opt_root_scan_time; }
 884   Tickspan rem_set_opt_trim_partially_time() const { return _rem_set_opt_trim_partially_time; }
 885 
 886   size_t opt_refs_scanned() const { return _opt_refs_scanned; }
 887   size_t opt_refs_memory_used() const { return _opt_refs_memory_used; }
 888 };
 889 
 890 void G1RemSet::scan_collection_set_regions(G1ParScanThreadState* pss,
 891                                            uint worker_id,
 892                                            G1GCPhaseTimes::GCParPhases scan_phase,
 893                                            G1GCPhaseTimes::GCParPhases coderoots_phase,
 894                                            G1GCPhaseTimes::GCParPhases objcopy_phase) {
 895   G1ScanCollectionSetRegionClosure cl(_scan_state, pss, worker_id, scan_phase, coderoots_phase);
 896   _g1h->collection_set_iterate_increment_from(&cl, worker_id);
 897 
 898   G1GCPhaseTimes* p = _g1h->phase_times();
 899 
 900   p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_root_scan_time().seconds());
 901   p->record_or_add_time_secs(scan_phase, worker_id, cl.rem_set_opt_trim_partially_time().seconds());
 902 
 903   p->record_or_add_time_secs(coderoots_phase, worker_id, cl.strong_code_root_scan_time().seconds());
 904   p->add_time_secs(objcopy_phase, worker_id, cl.strong_code_root_trim_partially_time().seconds());
 905 
 906   // At this time we record some metrics only for the evacuations after the initial one.
 907   if (scan_phase == G1GCPhaseTimes::OptScanHR) {
 908     p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanHRScannedOptRefs);
 909     p->record_or_add_thread_work_item(scan_phase, worker_id, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanHRUsedMemory);
 910   }
 911 }
 912 
 913 void G1RemSet::prepare_for_scan_heap_roots() {
 914   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
 915   dcqs.concatenate_logs();
 916 
 917   _scan_state->prepare();
 918 }
 919 
 920 class G1MergeHeapRootsTask : public AbstractGangTask {
 921 
 922   // Visitor for remembered sets, dropping entries onto the card table.
 923   class G1MergeCardSetClosure : public HeapRegionClosure {
 924     G1RemSetScanState* _scan_state;
 925     G1CardTable* _ct;
 926 
 927     uint _merged_sparse;
 928     uint _merged_fine;
 929     uint _merged_coarse;
 930 
 931     size_t _cards_dirty;
 932 
 933     // Returns if the region contains cards we need to scan. If so, remember that
 934     // region in the current set of dirty regions.
 935     bool remember_if_interesting(uint const region_idx) {
 936       if (!_scan_state->contains_cards_to_process(region_idx)) {
 937         return false;
 938       }
 939       _scan_state->add_dirty_region(region_idx);
 940       return true;
 941     }
 942   public:
 943     G1MergeCardSetClosure(G1RemSetScanState* scan_state) :
 944       _scan_state(scan_state),
 945       _ct(G1CollectedHeap::heap()->card_table()),
 946       _merged_sparse(0),
 947       _merged_fine(0),
 948       _merged_coarse(0),
 949       _cards_dirty(0) { }
 950 
 951     void next_coarse_prt(uint const region_idx) {
 952       if (!remember_if_interesting(region_idx)) {
 953         return;
 954       }
 955 
 956       _merged_coarse++;
 957 
 958       size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
 959       _cards_dirty += _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion);
 960       _scan_state->set_chunk_region_dirty(region_base_idx);
 961     }
 962 
 963     void next_fine_prt(uint const region_idx, BitMap* bm) {
 964       if (!remember_if_interesting(region_idx)) {
 965         return;
 966       }
 967 
 968       _merged_fine++;
 969 
 970       size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
 971       BitMap::idx_t cur = bm->get_next_one_offset(0);
 972       while (cur != bm->size()) {
 973         _cards_dirty += _ct->mark_clean_as_dirty(region_base_idx + cur);
 974         _scan_state->set_chunk_dirty(region_base_idx + cur);
 975         cur = bm->get_next_one_offset(cur + 1);
 976       }
 977     }
 978 
 979     void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) {
 980       if (!remember_if_interesting(region_idx)) {
 981         return;
 982       }
 983 
 984       _merged_sparse++;
 985 
 986       size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
 987       for (uint i = 0; i < num_cards; i++) {
 988         size_t card_idx = region_base_idx + cards[i];
 989         _cards_dirty += _ct->mark_clean_as_dirty(card_idx);
 990         _scan_state->set_chunk_dirty(card_idx);
 991       }
 992     }
 993 
 994     virtual bool do_heap_region(HeapRegion* r) {
 995       assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
 996 
 997       HeapRegionRemSet* rem_set = r->rem_set();
 998       if (!rem_set->is_empty()) {
 999         rem_set->iterate_prts(*this);
1000       }
1001 
1002       return false;
1003     }
1004 
1005     size_t merged_sparse() const { return _merged_sparse; }
1006     size_t merged_fine() const { return _merged_fine; }
1007     size_t merged_coarse() const { return _merged_coarse; }
1008 
1009     size_t cards_dirty() const { return _cards_dirty; }
1010   };
1011 
1012   // Visitor for the remembered sets of humongous candidate regions to merge their
1013   // remembered set into the card table.
1014   class G1FlushHumongousCandidateRemSets : public HeapRegionClosure {
1015     G1MergeCardSetClosure _cl;
1016 
1017   public:
1018     G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { }
1019 
1020     virtual bool do_heap_region(HeapRegion* r) {
1021       G1CollectedHeap* g1h = G1CollectedHeap::heap();
1022 
1023       if (!r->is_starts_humongous() ||
1024           !g1h->region_attr(r->hrm_index()).is_humongous() ||
1025           r->rem_set()->is_empty()) {
1026         return false;
1027       }
1028 
1029       guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
1030                 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
1031 
1032       _cl.do_heap_region(r);
1033 
1034       // We should only clear the card based remembered set here as we will not
1035       // implicitly rebuild anything else during eager reclaim. Note that at the moment
1036       // (and probably never) we do not enter this path if there are other kind of
1037       // remembered sets for this region.
1038       r->rem_set()->clear_locked(true /* only_cardset */);
1039       // Clear_locked() above sets the state to Empty. However we want to continue
1040       // collecting remembered set entries for humongous regions that were not
1041       // reclaimed.
1042       r->rem_set()->set_state_complete();
1043 #ifdef ASSERT
1044       G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index());
1045       assert(region_attr.needs_remset_update(), "must be");
1046 #endif
1047       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
1048 
1049       return false;
1050     }
1051 
1052     size_t merged_sparse() const { return _cl.merged_sparse(); }
1053     size_t merged_fine() const { return _cl.merged_fine(); }
1054     size_t merged_coarse() const { return _cl.merged_coarse(); }
1055 
1056     size_t cards_dirty() const { return _cl.cards_dirty(); }
1057   };
1058 
1059   // Visitor for the log buffer entries to merge them into the card table.
1060   class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
1061     G1RemSetScanState* _scan_state;
1062     G1CardTable* _ct;
1063 
1064     size_t _cards_dirty;
1065     size_t _cards_skipped;
1066   public:
1067     G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
1068       _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
1069     {}
1070 
1071     void do_card_ptr(CardValue* card_ptr, uint worker_id) {
1072       // The only time we care about recording cards that
1073       // contain references that point into the collection set
1074       // is during RSet updating within an evacuation pause.
1075       // In this case worker_id should be the id of a GC worker thread.
1076       assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
1077 
1078       uint const region_idx = _ct->region_idx_for(card_ptr);
1079 
1080       // The second clause must come after - the log buffers might contain cards to uncommited
1081       // regions.
1082       // This code may count duplicate entries in the log buffers (even if rare) multiple
1083       // times.
1084       if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) {
1085         _scan_state->add_dirty_region(region_idx);
1086         _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr));
1087         _cards_dirty++;
1088       } else {
1089         // We may have had dirty cards in the (initial) collection set (or the
1090         // young regions which are always in the initial collection set). We do
1091         // not fix their cards here: we already added these regions to the set of
1092         // regions to clear the card table at the end during the prepare() phase.
1093         _cards_skipped++;
1094       }
1095     }
1096 
1097     size_t cards_dirty() const { return _cards_dirty; }
1098     size_t cards_skipped() const { return _cards_skipped; }
1099   };
1100 
1101   HeapRegionClaimer _hr_claimer;
1102   G1RemSetScanState* _scan_state;
1103   BufferNode::Stack _dirty_card_buffers;
1104   bool _initial_evacuation;
1105 
1106   volatile bool _fast_reclaim_handled;
1107 
1108   void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) {
1109     G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1110     size_t buffer_size = dcqs.buffer_size();
1111     while (BufferNode* node = _dirty_card_buffers.pop()) {
1112       cl->apply_to_buffer(node, buffer_size, worker_id);
1113       dcqs.deallocate_buffer(node);
1114     }
1115   }
1116 
1117 public:
1118   G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool initial_evacuation) :
1119     AbstractGangTask("G1 Merge Heap Roots"),
1120     _hr_claimer(num_workers),
1121     _scan_state(scan_state),
1122     _dirty_card_buffers(),
1123     _initial_evacuation(initial_evacuation),
1124     _fast_reclaim_handled(false)
1125   {
1126     if (initial_evacuation) {
1127       G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1128       G1BufferNodeList buffers = dcqs.take_all_completed_buffers();
1129       if (buffers._entry_count != 0) {
1130         _dirty_card_buffers.prepend(*buffers._head, *buffers._tail);
1131       }
1132     }
1133   }
1134 
1135   virtual void work(uint worker_id) {
1136     G1CollectedHeap* g1h = G1CollectedHeap::heap();
1137     G1GCPhaseTimes* p = g1h->phase_times();
1138 
1139     G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ?
1140                                                      G1GCPhaseTimes::MergeRS :
1141                                                      G1GCPhaseTimes::OptMergeRS;
1142 
1143     // We schedule flushing the remembered sets of humongous fast reclaim candidates
1144     // onto the card table first to allow the remaining parallelized tasks hide it.
1145     if (_initial_evacuation &&
1146         p->fast_reclaim_humongous_candidates() > 0 &&
1147         !_fast_reclaim_handled &&
1148         !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) {
1149 
1150       G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
1151 
1152       G1FlushHumongousCandidateRemSets cl(_scan_state);
1153       g1h->heap_region_iterate(&cl);
1154 
1155       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1156       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1157       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1158       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeRSDirtyCards);
1159     }
1160 
1161     // Merge remembered sets of current candidates.
1162     {
1163       G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */);
1164       G1MergeCardSetClosure cl(_scan_state);
1165       g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
1166 
1167       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1168       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1169       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1170       p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeRSDirtyCards);
1171     }
1172 
1173     // Apply closure to log entries in the HCC.
1174     if (_initial_evacuation && G1HotCardCache::default_use_cache()) {
1175       assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1176       G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id);
1177       G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1178       g1h->iterate_hcc_closure(&cl, worker_id);
1179 
1180       p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards);
1181       p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards);
1182     }
1183 
1184     // Now apply the closure to all remaining log entries.
1185     if (_initial_evacuation) {
1186       assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1187       G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id);
1188 
1189       G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1190       apply_closure_to_dirty_card_buffers(&cl, worker_id);
1191 
1192       p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeLBDirtyCards);
1193       p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeLBSkippedCards);
1194     }
1195   }
1196 };
1197 
1198 void G1RemSet::print_merge_heap_roots_stats() {
1199   size_t num_visited_cards = _scan_state->num_visited_cards();
1200 
1201   size_t total_dirty_region_cards = _scan_state->num_cards_in_dirty_regions();
1202 
1203   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1204   size_t total_old_region_cards =
1205     (g1h->num_regions() - (g1h->num_free_regions() - g1h->collection_set()->cur_length())) * HeapRegion::CardsPerRegion;
1206 
1207   log_debug(gc,remset)("Visited cards " SIZE_FORMAT " Total dirty " SIZE_FORMAT " (%.2lf%%) Total old " SIZE_FORMAT " (%.2lf%%)",
1208                        num_visited_cards,
1209                        total_dirty_region_cards,
1210                        percent_of(num_visited_cards, total_dirty_region_cards),
1211                        total_old_region_cards,
1212                        percent_of(num_visited_cards, total_old_region_cards));
1213 }
1214 
1215 void G1RemSet::merge_heap_roots(bool initial_evacuation) {
1216   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1217 
1218   {
1219     Ticks start = Ticks::now();
1220 
1221     _scan_state->prepare_for_merge_heap_roots();
1222 
1223     Tickspan total = Ticks::now() - start;
1224     if (initial_evacuation) {
1225       g1h->phase_times()->record_prepare_merge_heap_roots_time(total.seconds() * 1000.0);
1226     } else {
1227       g1h->phase_times()->record_or_add_optional_prepare_merge_heap_roots_time(total.seconds() * 1000.0);
1228     }
1229   }
1230 
1231   WorkGang* workers = g1h->workers();
1232   size_t const increment_length = g1h->collection_set()->increment_length();
1233 
1234   uint const num_workers = initial_evacuation ? workers->active_workers() :
1235                                                 MIN2(workers->active_workers(), (uint)increment_length);
1236 
1237   {
1238     G1MergeHeapRootsTask cl(_scan_state, num_workers, initial_evacuation);
1239     log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " regions",
1240                         cl.name(), num_workers, increment_length);
1241     workers->run_task(&cl, num_workers);
1242   }
1243 
1244   if (log_is_enabled(Debug, gc, remset)) {
1245     print_merge_heap_roots_stats();
1246   }
1247 }
1248 
1249 void G1RemSet::prepare_for_scan_heap_roots(uint region_idx) {
1250   _scan_state->clear_scan_top(region_idx);
1251 }
1252 
1253 void G1RemSet::cleanup_after_scan_heap_roots() {
1254   G1GCPhaseTimes* phase_times = _g1h->phase_times();
1255 
1256   // Set all cards back to clean.
1257   double start = os::elapsedTime();
1258   _scan_state->cleanup(_g1h->workers());
1259   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
1260 }
1261 
1262 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
1263 #ifdef ASSERT
1264   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1265   assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
1266          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
1267          p2i(card_ptr),
1268          ct->index_for(ct->addr_for(card_ptr)),
1269          p2i(ct->addr_for(card_ptr)),
1270          g1h->addr_to_region(ct->addr_for(card_ptr)));
1271 #endif
1272 }
1273 
1274 bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
1275   assert(!_g1h->is_gc_active(), "Only call concurrently");
1276 
1277   CardValue* card_ptr = *card_ptr_addr;
1278   // Find the start address represented by the card.
1279   HeapWord* start = _ct->addr_for(card_ptr);
1280   // And find the region containing it.
1281   HeapRegion* r = _g1h->heap_region_containing_or_null(start);
1282 
1283   // If this is a (stale) card into an uncommitted region, exit.
1284   if (r == NULL) {
1285     return false;
1286   }
1287 
1288   check_card_ptr(card_ptr, _ct);
1289 
1290   // If the card is no longer dirty, nothing to do.
1291   // We cannot load the card value before the "r == NULL" check, because G1
1292   // could uncommit parts of the card table covering uncommitted regions.
1293   if (*card_ptr != G1CardTable::dirty_card_val()) {
1294     return false;
1295   }
1296 
1297   // This check is needed for some uncommon cases where we should
1298   // ignore the card.
1299   //
1300   // The region could be young.  Cards for young regions are
1301   // distinctly marked (set to g1_young_gen), so the post-barrier will
1302   // filter them out.  However, that marking is performed
1303   // concurrently.  A write to a young object could occur before the
1304   // card has been marked young, slipping past the filter.
1305   //
1306   // The card could be stale, because the region has been freed since
1307   // the card was recorded. In this case the region type could be
1308   // anything.  If (still) free or (reallocated) young, just ignore
1309   // it.  If (reallocated) old or humongous, the later card trimming
1310   // and additional checks in iteration may detect staleness.  At
1311   // worst, we end up processing a stale card unnecessarily.
1312   //
1313   // In the normal (non-stale) case, the synchronization between the
1314   // enqueueing of the card and processing it here will have ensured
1315   // we see the up-to-date region type here.
1316   if (!r->is_old_or_humongous_or_archive()) {
1317     return false;
1318   }
1319 
1320   // The result from the hot card cache insert call is either:
1321   //   * pointer to the current card
1322   //     (implying that the current card is not 'hot'),
1323   //   * null
1324   //     (meaning we had inserted the card ptr into the "hot" card cache,
1325   //     which had some headroom),
1326   //   * a pointer to a "hot" card that was evicted from the "hot" cache.
1327   //
1328 
1329   if (_hot_card_cache->use_cache()) {
1330     assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
1331 
1332     const CardValue* orig_card_ptr = card_ptr;
1333     card_ptr = _hot_card_cache->insert(card_ptr);
1334     if (card_ptr == NULL) {
1335       // There was no eviction. Nothing to do.
1336       return false;
1337     } else if (card_ptr != orig_card_ptr) {
1338       // Original card was inserted and an old card was evicted.
1339       start = _ct->addr_for(card_ptr);
1340       r = _g1h->heap_region_containing(start);
1341 
1342       // Check whether the region formerly in the cache should be
1343       // ignored, as discussed earlier for the original card.  The
1344       // region could have been freed while in the cache.
1345       if (!r->is_old_or_humongous_or_archive()) {
1346         return false;
1347       }
1348       *card_ptr_addr = card_ptr;
1349     } // Else we still have the original card.
1350   }
1351 
1352   // Trim the region designated by the card to what's been allocated
1353   // in the region.  The card could be stale, or the card could cover
1354   // (part of) an object at the end of the allocated space and extend
1355   // beyond the end of allocation.
1356 
1357   // Non-humongous objects are either allocated in the old regions during GC,
1358   // or mapped in archive regions during startup. So if region is old or
1359   // archive then top is stable.
1360   // Humongous object allocation sets top last; if top has not yet been set,
1361   // this is a stale card and we'll end up with an empty intersection.
1362   // If this is not a stale card, the synchronization between the
1363   // enqueuing of the card and processing it here will have ensured
1364   // we see the up-to-date top here.
1365   HeapWord* scan_limit = r->top();
1366 
1367   if (scan_limit <= start) {
1368     // If the trimmed region is empty, the card must be stale.
1369     return false;
1370   }
1371 
1372   // Okay to clean and process the card now.  There are still some
1373   // stale card cases that may be detected by iteration and dealt with
1374   // as iteration failure.
1375   *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
1376 
1377   return true;
1378 }
1379 
1380 void G1RemSet::refine_card_concurrently(CardValue* const card_ptr,
1381                                         const uint worker_id) {
1382   assert(!_g1h->is_gc_active(), "Only call concurrently");
1383   check_card_ptr(card_ptr, _ct);
1384 
1385   // Construct the MemRegion representing the card.
1386   HeapWord* start = _ct->addr_for(card_ptr);
1387   // And find the region containing it.
1388   HeapRegion* r = _g1h->heap_region_containing(start);
1389   // This reload of the top is safe even though it happens after the full
1390   // fence, because top is stable for old, archive and unfiltered humongous
1391   // regions, so it must return the same value as the previous load when
1392   // cleaning the card. Also cleaning the card and refinement of the card
1393   // cannot span across safepoint, so we don't need to worry about top being
1394   // changed during safepoint.
1395   HeapWord* scan_limit = r->top();
1396   assert(scan_limit > start, "sanity");
1397 
1398   // Don't use addr_for(card_ptr + 1) which can ask for
1399   // a card beyond the heap.
1400   HeapWord* end = start + G1CardTable::card_size_in_words;
1401   MemRegion dirty_region(start, MIN2(scan_limit, end));
1402   assert(!dirty_region.is_empty(), "sanity");
1403 
1404   G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id);
1405   if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
1406     return;
1407   }
1408 
1409   // If unable to process the card then we encountered an unparsable
1410   // part of the heap (e.g. a partially allocated object, so only
1411   // temporarily a problem) while processing a stale card.  Despite
1412   // the card being stale, we can't simply ignore it, because we've
1413   // already marked the card cleaned, so taken responsibility for
1414   // ensuring the card gets scanned.
1415   //
1416   // However, the card might have gotten re-dirtied and re-enqueued
1417   // while we worked.  (In fact, it's pretty likely.)
1418   if (*card_ptr == G1CardTable::dirty_card_val()) {
1419     return;
1420   }
1421 
1422   // Re-dirty the card and enqueue in the *shared* queue.  Can't use
1423   // the thread-local queue, because that might be the queue that is
1424   // being processed by us; we could be a Java thread conscripted to
1425   // perform refinement on our queue's current buffer.
1426   *card_ptr = G1CardTable::dirty_card_val();
1427   G1BarrierSet::shared_dirty_card_queue().enqueue(card_ptr);
1428 }
1429 
1430 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
1431   if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
1432       (period_count % G1SummarizeRSetStatsPeriod == 0)) {
1433 
1434     G1RemSetSummary current;
1435     _prev_period_summary.subtract_from(&current);
1436 
1437     Log(gc, remset) log;
1438     log.trace("%s", header);
1439     ResourceMark rm;
1440     LogStream ls(log.trace());
1441     _prev_period_summary.print_on(&ls);
1442 
1443     _prev_period_summary.set(&current);
1444   }
1445 }
1446 
1447 void G1RemSet::print_summary_info() {
1448   Log(gc, remset, exit) log;
1449   if (log.is_trace()) {
1450     log.trace(" Cumulative RS summary");
1451     G1RemSetSummary current;
1452     ResourceMark rm;
1453     LogStream ls(log.trace());
1454     current.print_on(&ls);
1455   }
1456 }
1457 
1458 class G1RebuildRemSetTask: public AbstractGangTask {
1459   // Aggregate the counting data that was constructed concurrently
1460   // with marking.
1461   class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure {
1462     G1ConcurrentMark* _cm;
1463     G1RebuildRemSetClosure _update_cl;
1464 
1465     // Applies _update_cl to the references of the given object, limiting objArrays
1466     // to the given MemRegion. Returns the amount of words actually scanned.
1467     size_t scan_for_references(oop const obj, MemRegion mr) {
1468       size_t const obj_size = obj->size();
1469       // All non-objArrays and objArrays completely within the mr
1470       // can be scanned without passing the mr.
1471       if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) {
1472         obj->oop_iterate(&_update_cl);
1473         return obj_size;
1474       }
1475       // This path is for objArrays crossing the given MemRegion. Only scan the
1476       // area within the MemRegion.
1477       obj->oop_iterate(&_update_cl, mr);
1478       return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size();
1479     }
1480 
1481     // A humongous object is live (with respect to the scanning) either
1482     // a) it is marked on the bitmap as such
1483     // b) its TARS is larger than TAMS, i.e. has been allocated during marking.
1484     bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const {
1485       return bitmap->is_marked(humongous_obj) || (tars > tams);
1486     }
1487 
1488     // Iterator over the live objects within the given MemRegion.
1489     class LiveObjIterator : public StackObj {
1490       const G1CMBitMap* const _bitmap;
1491       const HeapWord* _tams;
1492       const MemRegion _mr;
1493       HeapWord* _current;
1494 
1495       bool is_below_tams() const {
1496         return _current < _tams;
1497       }
1498 
1499       bool is_live(HeapWord* obj) const {
1500         return !is_below_tams() || _bitmap->is_marked(obj);
1501       }
1502 
1503       HeapWord* bitmap_limit() const {
1504         return MIN2(const_cast<HeapWord*>(_tams), _mr.end());
1505       }
1506 
1507       void move_if_below_tams() {
1508         if (is_below_tams() && has_next()) {
1509           _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
1510         }
1511       }
1512     public:
1513       LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) :
1514           _bitmap(bitmap),
1515           _tams(tams),
1516           _mr(mr),
1517           _current(first_oop_into_mr) {
1518 
1519         assert(_current <= _mr.start(),
1520                "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")",
1521                p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end()));
1522 
1523         // Step to the next live object within the MemRegion if needed.
1524         if (is_live(_current)) {
1525           // Non-objArrays were scanned by the previous part of that region.
1526           if (_current < mr.start() && !oop(_current)->is_objArray()) {
1527             _current += oop(_current)->size();
1528             // We might have positioned _current on a non-live object. Reposition to the next
1529             // live one if needed.
1530             move_if_below_tams();
1531           }
1532         } else {
1533           // The object at _current can only be dead if below TAMS, so we can use the bitmap.
1534           // immediately.
1535           _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
1536           assert(_current == _mr.end() || is_live(_current),
1537                  "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")",
1538                  p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end()));
1539         }
1540       }
1541 
1542       void move_to_next() {
1543         _current += next()->size();
1544         move_if_below_tams();
1545       }
1546 
1547       oop next() const {
1548         oop result = oop(_current);
1549         assert(is_live(_current),
1550                "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d",
1551                p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result));
1552         return result;
1553       }
1554 
1555       bool has_next() const {
1556         return _current < _mr.end();
1557       }
1558     };
1559 
1560     // Rebuild remembered sets in the part of the region specified by mr and hr.
1561     // Objects between the bottom of the region and the TAMS are checked for liveness
1562     // using the given bitmap. Objects between TAMS and TARS are assumed to be live.
1563     // Returns the number of live words between bottom and TAMS.
1564     size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap,
1565                                      HeapWord* const top_at_mark_start,
1566                                      HeapWord* const top_at_rebuild_start,
1567                                      HeapRegion* hr,
1568                                      MemRegion mr) {
1569       size_t marked_words = 0;
1570 
1571       if (hr->is_humongous()) {
1572         oop const humongous_obj = oop(hr->humongous_start_region()->bottom());
1573         if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) {
1574           // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start);
1575           // however in case of humongous objects it is sufficient to scan the encompassing
1576           // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the
1577           // two areas will be zero sized. I.e. TAMS is either
1578           // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different
1579           // value: this would mean that TAMS points somewhere into the object.
1580           assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
1581                  "More than one object in the humongous region?");
1582           humongous_obj->oop_iterate(&_update_cl, mr);
1583           return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0;
1584         } else {
1585           return 0;
1586         }
1587       }
1588 
1589       for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
1590         oop obj = it.next();
1591         size_t scanned_size = scan_for_references(obj, mr);
1592         if ((HeapWord*)obj < top_at_mark_start) {
1593           marked_words += scanned_size;
1594         }
1595       }
1596 
1597       return marked_words * HeapWordSize;
1598     }
1599 public:
1600   G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h,
1601                                    G1ConcurrentMark* cm,
1602                                    uint worker_id) :
1603     HeapRegionClosure(),
1604     _cm(cm),
1605     _update_cl(g1h, worker_id) { }
1606 
1607     bool do_heap_region(HeapRegion* hr) {
1608       if (_cm->has_aborted()) {
1609         return true;
1610       }
1611 
1612       uint const region_idx = hr->hrm_index();
1613       DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);)
1614       assert(top_at_rebuild_start_check == NULL ||
1615              top_at_rebuild_start_check > hr->bottom(),
1616              "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)",
1617              p2i(top_at_rebuild_start_check), p2i(hr->bottom()),  region_idx, hr->get_type_str());
1618 
1619       size_t total_marked_bytes = 0;
1620       size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize;
1621 
1622       HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start();
1623 
1624       HeapWord* cur = hr->bottom();
1625       while (cur < hr->end()) {
1626         // After every iteration (yield point) we need to check whether the region's
1627         // TARS changed due to e.g. eager reclaim.
1628         HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);
1629         if (top_at_rebuild_start == NULL) {
1630           return false;
1631         }
1632 
1633         MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words));
1634         if (next_chunk.is_empty()) {
1635           break;
1636         }
1637 
1638         const Ticks start = Ticks::now();
1639         size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(),
1640                                                         top_at_mark_start,
1641                                                         top_at_rebuild_start,
1642                                                         hr,
1643                                                         next_chunk);
1644         Tickspan time = Ticks::now() - start;
1645 
1646         log_trace(gc, remset, tracking)("Rebuilt region %u "
1647                                         "live " SIZE_FORMAT " "
1648                                         "time %.3fms "
1649                                         "marked bytes " SIZE_FORMAT " "
1650                                         "bot " PTR_FORMAT " "
1651                                         "TAMS " PTR_FORMAT " "
1652                                         "TARS " PTR_FORMAT,
1653                                         region_idx,
1654                                         _cm->liveness(region_idx) * HeapWordSize,
1655                                         time.seconds() * 1000.0,
1656                                         marked_bytes,
1657                                         p2i(hr->bottom()),
1658                                         p2i(top_at_mark_start),
1659                                         p2i(top_at_rebuild_start));
1660 
1661         if (marked_bytes > 0) {
1662           total_marked_bytes += marked_bytes;
1663         }
1664         cur += chunk_size_in_words;
1665 
1666         _cm->do_yield_check();
1667         if (_cm->has_aborted()) {
1668           return true;
1669         }
1670       }
1671       // In the final iteration of the loop the region might have been eagerly reclaimed.
1672       // Simply filter out those regions. We can not just use region type because there
1673       // might have already been new allocations into these regions.
1674       DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);)
1675       assert(top_at_rebuild_start == NULL ||
1676              total_marked_bytes == hr->marked_bytes(),
1677              "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " "
1678              "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")",
1679              total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(),
1680              p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start));
1681        // Abort state may have changed after the yield check.
1682       return _cm->has_aborted();
1683     }
1684   };
1685 
1686   HeapRegionClaimer _hr_claimer;
1687   G1ConcurrentMark* _cm;
1688 
1689   uint _worker_id_offset;
1690 public:
1691   G1RebuildRemSetTask(G1ConcurrentMark* cm,
1692                       uint n_workers,
1693                       uint worker_id_offset) :
1694       AbstractGangTask("G1 Rebuild Remembered Set"),
1695       _hr_claimer(n_workers),
1696       _cm(cm),
1697       _worker_id_offset(worker_id_offset) {
1698   }
1699 
1700   void work(uint worker_id) {
1701     SuspendibleThreadSetJoiner sts_join;
1702 
1703     G1CollectedHeap* g1h = G1CollectedHeap::heap();
1704 
1705     G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id);
1706     g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
1707   }
1708 };
1709 
1710 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm,
1711                                WorkGang* workers,
1712                                uint worker_id_offset) {
1713   uint num_workers = workers->active_workers();
1714 
1715   G1RebuildRemSetTask cl(cm,
1716                          num_workers,
1717                          worker_id_offset);
1718   workers->run_task(&cl, num_workers);
1719 }