1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/dirtyCardQueue.hpp"
  27 #include "gc/g1/g1BarrierSet.hpp"
  28 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
  29 #include "gc/g1/g1CardTable.inline.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1ConcurrentRefine.hpp"
  32 #include "gc/g1/g1FromCardCache.hpp"
  33 #include "gc/g1/g1GCPhaseTimes.hpp"
  34 #include "gc/g1/g1HotCardCache.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1RootClosures.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionManager.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/shared/gcTraceTime.inline.hpp"
  42 #include "gc/shared/suspendibleThreadSet.hpp"
  43 #include "jfr/jfrEvents.hpp"
  44 #include "memory/iterator.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "oops/access.inline.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/os.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/intHisto.hpp"
  52 #include "utilities/stack.inline.hpp"
  53 #include "utilities/ticks.hpp"
  54 
  55 // Collects information about the overall remembered set scan progress during an evacuation.
  56 class G1RemSetScanState : public CHeapObj<mtGC> {
  57 private:
  58   class G1ClearCardTableTask : public AbstractGangTask {
  59     G1CollectedHeap* _g1h;
  60     uint* _dirty_region_list;
  61     size_t _num_dirty_regions;
  62     size_t _chunk_length;
  63 
  64     size_t volatile _cur_dirty_regions;
  65   public:
  66     G1ClearCardTableTask(G1CollectedHeap* g1h,
  67                          uint* dirty_region_list,
  68                          size_t num_dirty_regions,
  69                          size_t chunk_length) :
  70       AbstractGangTask("G1 Clear Card Table Task"),
  71       _g1h(g1h),
  72       _dirty_region_list(dirty_region_list),
  73       _num_dirty_regions(num_dirty_regions),
  74       _chunk_length(chunk_length),
  75       _cur_dirty_regions(0) {
  76 
  77       assert(chunk_length > 0, "must be");
  78     }
  79 
  80     static size_t chunk_size() { return M; }
  81 
  82     void work(uint worker_id) {
  83       while (_cur_dirty_regions < _num_dirty_regions) {
  84         size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
  85         size_t max = MIN2(next + _chunk_length, _num_dirty_regions);
  86 
  87         for (size_t i = next; i < max; i++) {
  88           HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
  89           if (!r->is_survivor()) {
  90             r->clear_cardtable();
  91           }
  92         }
  93       }
  94     }
  95   };
  96 
  97   size_t _max_regions;
  98 
  99   // Scan progress for the remembered set of a single region. Transitions from
 100   // Unclaimed -> Claimed -> Complete.
 101   // At each of the transitions the thread that does the transition needs to perform
 102   // some special action once. This is the reason for the extra "Claimed" state.
 103   typedef jint G1RemsetIterState;
 104 
 105   static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet.
 106   static const G1RemsetIterState Claimed = 1;   // The remembered set is currently being scanned.
 107   static const G1RemsetIterState Complete = 2;  // The remembered set has been completely scanned.
 108 
 109   G1RemsetIterState volatile* _iter_states;
 110   // The current location where the next thread should continue scanning in a region's
 111   // remembered set.
 112   size_t volatile* _iter_claims;
 113 
 114   // Temporary buffer holding the regions we used to store remembered set scan duplicate
 115   // information. These are also called "dirty". Valid entries are from [0.._cur_dirty_region)
 116   uint* _dirty_region_buffer;
 117 
 118   typedef jbyte IsDirtyRegionState;
 119   static const IsDirtyRegionState Clean = 0;
 120   static const IsDirtyRegionState Dirty = 1;
 121   // Holds a flag for every region whether it is in the _dirty_region_buffer already
 122   // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools.
 123   IsDirtyRegionState* _in_dirty_region_buffer;
 124   size_t _cur_dirty_region;
 125 
 126   // Creates a snapshot of the current _top values at the start of collection to
 127   // filter out card marks that we do not want to scan.
 128   class G1ResetScanTopClosure : public HeapRegionClosure {
 129   private:
 130     HeapWord** _scan_top;
 131   public:
 132     G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { }
 133 
 134     virtual bool do_heap_region(HeapRegion* r) {
 135       uint hrm_index = r->hrm_index();
 136       if (!r->in_collection_set() && r->is_old_or_humongous_or_archive() && !r->is_empty()) {
 137         _scan_top[hrm_index] = r->top();
 138       } else {
 139         _scan_top[hrm_index] = NULL;
 140       }
 141       return false;
 142     }
 143   };
 144 
 145   // For each region, contains the maximum top() value to be used during this garbage
 146   // collection. Subsumes common checks like filtering out everything but old and
 147   // humongous regions outside the collection set.
 148   // This is valid because we are not interested in scanning stray remembered set
 149   // entries from free or archive regions.
 150   HeapWord** _scan_top;
 151 public:
 152   G1RemSetScanState() :
 153     _max_regions(0),
 154     _iter_states(NULL),
 155     _iter_claims(NULL),
 156     _dirty_region_buffer(NULL),
 157     _in_dirty_region_buffer(NULL),
 158     _cur_dirty_region(0),
 159     _scan_top(NULL) {
 160   }
 161 
 162   ~G1RemSetScanState() {
 163     if (_iter_states != NULL) {
 164       FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states);
 165     }
 166     if (_iter_claims != NULL) {
 167       FREE_C_HEAP_ARRAY(size_t, _iter_claims);
 168     }
 169     if (_dirty_region_buffer != NULL) {
 170       FREE_C_HEAP_ARRAY(uint, _dirty_region_buffer);
 171     }
 172     if (_in_dirty_region_buffer != NULL) {
 173       FREE_C_HEAP_ARRAY(IsDirtyRegionState, _in_dirty_region_buffer);
 174     }
 175     if (_scan_top != NULL) {
 176       FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
 177     }
 178   }
 179 
 180   void initialize(uint max_regions) {
 181     assert(_iter_states == NULL, "Must not be initialized twice");
 182     assert(_iter_claims == NULL, "Must not be initialized twice");
 183     _max_regions = max_regions;
 184     _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
 185     _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
 186     _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
 187     _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC);
 188     _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC);
 189   }
 190 
 191   void reset() {
 192     for (uint i = 0; i < _max_regions; i++) {
 193       _iter_states[i] = Unclaimed;
 194       _scan_top[i] = NULL;
 195     }
 196 
 197     G1ResetScanTopClosure cl(_scan_top);
 198     G1CollectedHeap::heap()->heap_region_iterate(&cl);
 199 
 200     memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t));
 201     memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState));
 202     _cur_dirty_region = 0;
 203   }
 204 
 205   // Attempt to claim the remembered set of the region for iteration. Returns true
 206   // if this call caused the transition from Unclaimed to Claimed.
 207   inline bool claim_iter(uint region) {
 208     assert(region < _max_regions, "Tried to access invalid region %u", region);
 209     if (_iter_states[region] != Unclaimed) {
 210       return false;
 211     }
 212     G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed);
 213     return (res == Unclaimed);
 214   }
 215 
 216   // Try to atomically sets the iteration state to "complete". Returns true for the
 217   // thread that caused the transition.
 218   inline bool set_iter_complete(uint region) {
 219     if (iter_is_complete(region)) {
 220       return false;
 221     }
 222     G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed);
 223     return (res == Claimed);
 224   }
 225 
 226   // Returns true if the region's iteration is complete.
 227   inline bool iter_is_complete(uint region) const {
 228     assert(region < _max_regions, "Tried to access invalid region %u", region);
 229     return _iter_states[region] == Complete;
 230   }
 231 
 232   // The current position within the remembered set of the given region.
 233   inline size_t iter_claimed(uint region) const {
 234     assert(region < _max_regions, "Tried to access invalid region %u", region);
 235     return _iter_claims[region];
 236   }
 237 
 238   // Claim the next block of cards within the remembered set of the region with
 239   // step size.
 240   inline size_t iter_claimed_next(uint region, size_t step) {
 241     return Atomic::add(step, &_iter_claims[region]) - step;
 242   }
 243 
 244   void add_dirty_region(uint region) {
 245     if (_in_dirty_region_buffer[region] == Dirty) {
 246       return;
 247     }
 248 
 249     bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean;
 250     if (marked_as_dirty) {
 251       size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1;
 252       _dirty_region_buffer[allocated] = region;
 253     }
 254   }
 255 
 256   HeapWord* scan_top(uint region_idx) const {
 257     return _scan_top[region_idx];
 258   }
 259 
 260   // Clear the card table of "dirty" regions.
 261   void clear_card_table(WorkGang* workers) {
 262     if (_cur_dirty_region == 0) {
 263       return;
 264     }
 265 
 266     size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size();
 267     uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 268     size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion;
 269 
 270     // Iterate over the dirty cards region list.
 271     G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length);
 272 
 273     log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " "
 274                         "units of work for " SIZE_FORMAT " regions.",
 275                         cl.name(), num_workers, num_chunks, _cur_dirty_region);
 276     workers->run_task(&cl, num_workers);
 277 
 278 #ifndef PRODUCT
 279     G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
 280 #endif
 281   }
 282 };
 283 
 284 G1RemSet::G1RemSet(G1CollectedHeap* g1h,
 285                    G1CardTable* ct,
 286                    G1HotCardCache* hot_card_cache) :
 287   _scan_state(new G1RemSetScanState()),
 288   _prev_period_summary(),
 289   _g1h(g1h),
 290   _num_conc_refined_cards(0),
 291   _ct(ct),
 292   _g1p(_g1h->g1_policy()),
 293   _hot_card_cache(hot_card_cache) {
 294 }
 295 
 296 G1RemSet::~G1RemSet() {
 297   if (_scan_state != NULL) {
 298     delete _scan_state;
 299   }
 300 }
 301 
 302 uint G1RemSet::num_par_rem_sets() {
 303   return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
 304 }
 305 
 306 void G1RemSet::initialize(size_t capacity, uint max_regions) {
 307   G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
 308   _scan_state->initialize(max_regions);
 309 }
 310 
 311 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
 312                                                    G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
 313                                                    G1ParScanThreadState* pss,
 314                                                    uint worker_i) :
 315   _g1h(G1CollectedHeap::heap()),
 316   _ct(_g1h->card_table()),
 317   _pss(pss),
 318   _scan_objs_on_card_cl(scan_obj_on_card),
 319   _scan_state(scan_state),
 320   _worker_i(worker_i),
 321   _cards_scanned(0),
 322   _cards_claimed(0),
 323   _cards_skipped(0),
 324   _rem_set_root_scan_time(),
 325   _rem_set_trim_partially_time(),
 326   _strong_code_root_scan_time(),
 327   _strong_code_trim_partially_time() {
 328 }
 329 
 330 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
 331   _ct->set_card_claimed(card_index);
 332   _scan_state->add_dirty_region(region_idx_for_card);
 333 }
 334 
 335 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
 336   HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
 337   assert(!card_region->is_young(), "Should not scan card in young region %u", region_idx_for_card);
 338   card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
 339   _scan_objs_on_card_cl->trim_queue_partially();
 340   _cards_scanned++;
 341 }
 342 
 343 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
 344   EventGCPhaseParallel event;
 345   uint const region_idx = r->hrm_index();
 346 
 347   if (_scan_state->claim_iter(region_idx)) {
 348     // If we ever free the collection set concurrently, we should also
 349     // clear the card table concurrently therefore we won't need to
 350     // add regions of the collection set to the dirty cards region.
 351     _scan_state->add_dirty_region(region_idx);
 352   }
 353 
 354   if (r->rem_set()->cardset_is_empty()) {
 355     return;
 356   }
 357 
 358   // We claim cards in blocks so as to reduce the contention.
 359   size_t const block_size = G1RSetScanBlockSize;
 360 
 361   HeapRegionRemSetIterator iter(r->rem_set());
 362   size_t card_index;
 363 
 364   size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size);
 365   for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
 366     if (current_card >= claimed_card_block + block_size) {
 367       claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size);
 368     }
 369     if (current_card < claimed_card_block) {
 370       _cards_skipped++;
 371       continue;
 372     }
 373     _cards_claimed++;
 374 
 375     HeapWord* const card_start = _g1h->bot()->address_for_index_raw(card_index);
 376     uint const region_idx_for_card = _g1h->addr_to_region(card_start);
 377 
 378 #ifdef ASSERT
 379     HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card);
 380     assert(hr == NULL || hr->is_in_reserved(card_start),
 381            "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index());
 382 #endif
 383     HeapWord* const top = _scan_state->scan_top(region_idx_for_card);
 384     if (card_start >= top) {
 385       continue;
 386     }
 387 
 388     // If the card is dirty, then G1 will scan it during Update RS.
 389     if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
 390       continue;
 391     }
 392 
 393     // We claim lazily (so races are possible but they're benign), which reduces the
 394     // number of duplicate scans (the rsets of the regions in the cset can intersect).
 395     // Claim the card after checking bounds above: the remembered set may contain
 396     // random cards into current survivor, and we would then have an incorrectly
 397     // claimed card in survivor space. Card table clear does not reset the card table
 398     // of survivor space regions.
 399     claim_card(card_index, region_idx_for_card);
 400 
 401     MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
 402 
 403     scan_card(mr, region_idx_for_card);
 404   }
 405   event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanRS));
 406 }
 407 
 408 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
 409   EventGCPhaseParallel event;
 410   r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
 411   event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots));
 412 }
 413 
 414 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
 415   assert(r->in_collection_set(),
 416          "Should only be called on elements of the collection set but region %u is not.",
 417          r->hrm_index());
 418   uint const region_idx = r->hrm_index();
 419 
 420   // Do an early out if we know we are complete.
 421   if (_scan_state->iter_is_complete(region_idx)) {
 422     return false;
 423   }
 424 
 425   {
 426     G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
 427     scan_rem_set_roots(r);
 428   }
 429 
 430   if (_scan_state->set_iter_complete(region_idx)) {
 431     G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time);
 432     // Scan the strong code root list attached to the current region
 433     scan_strong_code_roots(r);
 434   }
 435   return false;
 436 }
 437 
 438 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
 439   G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
 440   G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, worker_i);
 441   _g1h->collection_set_iterate_from(&cl, worker_i);
 442 
 443   G1GCPhaseTimes* p = _g1p->phase_times();
 444 
 445   p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds());
 446   p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds());
 447 
 448   p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
 449   p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
 450   p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
 451 
 452   p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds());
 453   p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds());
 454 }
 455 
 456 // Closure used for updating rem sets. Only called during an evacuation pause.
 457 class G1RefineCardClosure: public CardTableEntryClosure {
 458   G1RemSet* _g1rs;
 459   G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
 460 
 461   size_t _cards_scanned;
 462   size_t _cards_skipped;
 463 public:
 464   G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) :
 465     _g1rs(g1h->g1_rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
 466   {}
 467 
 468   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 469     // The only time we care about recording cards that
 470     // contain references that point into the collection set
 471     // is during RSet updating within an evacuation pause.
 472     // In this case worker_i should be the id of a GC worker thread.
 473     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
 474 
 475     bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl);
 476 
 477     if (card_scanned) {
 478       _update_rs_cl->trim_queue_partially();
 479       _cards_scanned++;
 480     } else {
 481       _cards_skipped++;
 482     }
 483     return true;
 484   }
 485 
 486   size_t cards_scanned() const { return _cards_scanned; }
 487   size_t cards_skipped() const { return _cards_skipped; }
 488 };
 489 
 490 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
 491   G1GCPhaseTimes* p = _g1p->phase_times();
 492 
 493   // Apply closure to log entries in the HCC.
 494   if (G1HotCardCache::default_use_cache()) {
 495     G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i);
 496 
 497     G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss);
 498     G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl);
 499     _g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
 500   }
 501 
 502   // Now apply the closure to all remaining log entries.
 503   {
 504     G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i);
 505 
 506     G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss);
 507     G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
 508     _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
 509 
 510     p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
 511     p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
 512   }
 513 }
 514 
 515 void G1RemSet::cleanupHRRS() {
 516   HeapRegionRemSet::cleanup();
 517 }
 518 
 519 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
 520   update_rem_set(pss, worker_i);
 521   scan_rem_set(pss, worker_i);;
 522 }
 523 
 524 void G1RemSet::prepare_for_oops_into_collection_set_do() {
 525   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
 526   dcqs.concatenate_logs();
 527 
 528   _scan_state->reset();
 529 }
 530 
 531 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
 532   G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
 533 
 534   // Set all cards back to clean.
 535   double start = os::elapsedTime();
 536   _scan_state->clear_card_table(_g1h->workers());
 537   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
 538 }
 539 
 540 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
 541 #ifdef ASSERT
 542   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 543   assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
 544          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
 545          p2i(card_ptr),
 546          ct->index_for(ct->addr_for(card_ptr)),
 547          p2i(ct->addr_for(card_ptr)),
 548          g1h->addr_to_region(ct->addr_for(card_ptr)));
 549 #endif
 550 }
 551 
 552 void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
 553                                         uint worker_i) {
 554   assert(!_g1h->is_gc_active(), "Only call concurrently");
 555 
 556   // Construct the region representing the card.
 557   HeapWord* start = _ct->addr_for(card_ptr);
 558   // And find the region containing it.
 559   HeapRegion* r = _g1h->heap_region_containing_or_null(start);
 560 
 561   // If this is a (stale) card into an uncommitted region, exit.
 562   if (r == NULL) {
 563     return;
 564   }  
 565 
 566   check_card_ptr(card_ptr, _ct);
 567 
 568   // If the card is no longer dirty, nothing to do.
 569   if (*card_ptr != G1CardTable::dirty_card_val()) {
 570     return;
 571   }
 572 
 573   // This check is needed for some uncommon cases where we should
 574   // ignore the card.
 575   //
 576   // The region could be young.  Cards for young regions are
 577   // distinctly marked (set to g1_young_gen), so the post-barrier will
 578   // filter them out.  However, that marking is performed
 579   // concurrently.  A write to a young object could occur before the
 580   // card has been marked young, slipping past the filter.
 581   //
 582   // The card could be stale, because the region has been freed since
 583   // the card was recorded. In this case the region type could be
 584   // anything.  If (still) free or (reallocated) young, just ignore
 585   // it.  If (reallocated) old or humongous, the later card trimming
 586   // and additional checks in iteration may detect staleness.  At
 587   // worst, we end up processing a stale card unnecessarily.
 588   //
 589   // In the normal (non-stale) case, the synchronization between the
 590   // enqueueing of the card and processing it here will have ensured
 591   // we see the up-to-date region type here.
 592   if (!r->is_old_or_humongous_or_archive()) {
 593     return;
 594   }
 595 
 596   // The result from the hot card cache insert call is either:
 597   //   * pointer to the current card
 598   //     (implying that the current card is not 'hot'),
 599   //   * null
 600   //     (meaning we had inserted the card ptr into the "hot" card cache,
 601   //     which had some headroom),
 602   //   * a pointer to a "hot" card that was evicted from the "hot" cache.
 603   //
 604 
 605   if (_hot_card_cache->use_cache()) {
 606     assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
 607 
 608     const jbyte* orig_card_ptr = card_ptr;
 609     card_ptr = _hot_card_cache->insert(card_ptr);
 610     if (card_ptr == NULL) {
 611       // There was no eviction. Nothing to do.
 612       return;
 613     } else if (card_ptr != orig_card_ptr) {
 614       // Original card was inserted and an old card was evicted.
 615       start = _ct->addr_for(card_ptr);
 616       r = _g1h->heap_region_containing(start);
 617 
 618       // Check whether the region formerly in the cache should be
 619       // ignored, as discussed earlier for the original card.  The
 620       // region could have been freed while in the cache.
 621       if (!r->is_old_or_humongous_or_archive()) {
 622         return;
 623       }
 624     } // Else we still have the original card.
 625   }
 626 
 627   // Trim the region designated by the card to what's been allocated
 628   // in the region.  The card could be stale, or the card could cover
 629   // (part of) an object at the end of the allocated space and extend
 630   // beyond the end of allocation.
 631 
 632   // Non-humongous objects are only allocated in the old-gen during
 633   // GC, so if region is old then top is stable.  Humongous object
 634   // allocation sets top last; if top has not yet been set, this is
 635   // a stale card and we'll end up with an empty intersection.  If
 636   // this is not a stale card, the synchronization between the
 637   // enqueuing of the card and processing it here will have ensured
 638   // we see the up-to-date top here.
 639   HeapWord* scan_limit = r->top();
 640 
 641   if (scan_limit <= start) {
 642     // If the trimmed region is empty, the card must be stale.
 643     return;
 644   }
 645 
 646   // Okay to clean and process the card now.  There are still some
 647   // stale card cases that may be detected by iteration and dealt with
 648   // as iteration failure.
 649   *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
 650 
 651   // This fence serves two purposes.  First, the card must be cleaned
 652   // before processing the contents.  Second, we can't proceed with
 653   // processing until after the read of top, for synchronization with
 654   // possibly concurrent humongous object allocation.  It's okay that
 655   // reading top and reading type were racy wrto each other.  We need
 656   // both set, in any order, to proceed.
 657   OrderAccess::fence();
 658 
 659   // Don't use addr_for(card_ptr + 1) which can ask for
 660   // a card beyond the heap.
 661   HeapWord* end = start + G1CardTable::card_size_in_words;
 662   MemRegion dirty_region(start, MIN2(scan_limit, end));
 663   assert(!dirty_region.is_empty(), "sanity");
 664 
 665   G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i);
 666 
 667   bool card_processed =
 668     r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl);
 669 
 670   // If unable to process the card then we encountered an unparsable
 671   // part of the heap (e.g. a partially allocated object) while
 672   // processing a stale card.  Despite the card being stale, redirty
 673   // and re-enqueue, because we've already cleaned the card.  Without
 674   // this we could incorrectly discard a non-stale card.
 675   if (!card_processed) {
 676     // The card might have gotten re-dirtied and re-enqueued while we
 677     // worked.  (In fact, it's pretty likely.)
 678     if (*card_ptr != G1CardTable::dirty_card_val()) {
 679       *card_ptr = G1CardTable::dirty_card_val();
 680       MutexLockerEx x(Shared_DirtyCardQ_lock,
 681                       Mutex::_no_safepoint_check_flag);
 682       DirtyCardQueue* sdcq =
 683         G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue();
 684       sdcq->enqueue(card_ptr);
 685     }
 686   } else {
 687     _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
 688   }
 689 }
 690 
 691 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
 692                                      G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
 693   assert(_g1h->is_gc_active(), "Only call during GC");
 694 
 695   // Construct the region representing the card.
 696   HeapWord* card_start = _ct->addr_for(card_ptr);
 697   // And find the region containing it.
 698   uint const card_region_idx = _g1h->addr_to_region(card_start);
 699 
 700   HeapWord* scan_limit = _scan_state->scan_top(card_region_idx);
 701   if (scan_limit == NULL) {
 702     // This is a card into an uncommitted region. We need to bail out early as we
 703     // should not access the corresponding card table entry.
 704     return false;
 705   }
 706 
 707   check_card_ptr(card_ptr, _ct);
 708 
 709   // If the card is no longer dirty, nothing to do. This covers cards that were already
 710   // scanned as parts of the remembered sets.
 711   if (*card_ptr != G1CardTable::dirty_card_val()) {
 712     return false;
 713   }
 714 
 715   // We claim lazily (so races are possible but they're benign), which reduces the
 716   // number of potential duplicate scans (multiple threads may enqueue the same card twice).
 717   *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
 718 
 719   _scan_state->add_dirty_region(card_region_idx);
 720   if (scan_limit <= card_start) {
 721     // If the card starts above the area in the region containing objects to scan, skip it.
 722     return false;
 723   }
 724 
 725   // Don't use addr_for(card_ptr + 1) which can ask for
 726   // a card beyond the heap.
 727   HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
 728   MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
 729   assert(!dirty_region.is_empty(), "sanity");
 730 
 731   HeapRegion* const card_region = _g1h->region_at(card_region_idx);
 732   assert(!card_region->is_young(), "Should not scan card in young region %u", card_region_idx);
 733   bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl);
 734   assert(card_processed, "must be");
 735   return true;
 736 }
 737 
 738 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
 739   if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
 740       (period_count % G1SummarizeRSetStatsPeriod == 0)) {
 741 
 742     G1RemSetSummary current(this);
 743     _prev_period_summary.subtract_from(&current);
 744 
 745     Log(gc, remset) log;
 746     log.trace("%s", header);
 747     ResourceMark rm;
 748     LogStream ls(log.trace());
 749     _prev_period_summary.print_on(&ls);
 750 
 751     _prev_period_summary.set(&current);
 752   }
 753 }
 754 
 755 void G1RemSet::print_summary_info() {
 756   Log(gc, remset, exit) log;
 757   if (log.is_trace()) {
 758     log.trace(" Cumulative RS summary");
 759     G1RemSetSummary current(this);
 760     ResourceMark rm;
 761     LogStream ls(log.trace());
 762     current.print_on(&ls);
 763   }
 764 }
 765 
 766 class G1RebuildRemSetTask: public AbstractGangTask {
 767   // Aggregate the counting data that was constructed concurrently
 768   // with marking.
 769   class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure {
 770     G1ConcurrentMark* _cm;
 771     G1RebuildRemSetClosure _update_cl;
 772 
 773     // Applies _update_cl to the references of the given object, limiting objArrays
 774     // to the given MemRegion. Returns the amount of words actually scanned.
 775     size_t scan_for_references(oop const obj, MemRegion mr) {
 776       size_t const obj_size = obj->size();
 777       // All non-objArrays and objArrays completely within the mr
 778       // can be scanned without passing the mr.
 779       if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) {
 780         obj->oop_iterate(&_update_cl);
 781         return obj_size;
 782       }
 783       // This path is for objArrays crossing the given MemRegion. Only scan the
 784       // area within the MemRegion.
 785       obj->oop_iterate(&_update_cl, mr);
 786       return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size();
 787     }
 788 
 789     // A humongous object is live (with respect to the scanning) either
 790     // a) it is marked on the bitmap as such
 791     // b) its TARS is larger than TAMS, i.e. has been allocated during marking.
 792     bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const {
 793       return bitmap->is_marked(humongous_obj) || (tars > tams);
 794     }
 795 
 796     // Iterator over the live objects within the given MemRegion.
 797     class LiveObjIterator : public StackObj {
 798       const G1CMBitMap* const _bitmap;
 799       const HeapWord* _tams;
 800       const MemRegion _mr;
 801       HeapWord* _current;
 802 
 803       bool is_below_tams() const {
 804         return _current < _tams;
 805       }
 806 
 807       bool is_live(HeapWord* obj) const {
 808         return !is_below_tams() || _bitmap->is_marked(obj);
 809       }
 810 
 811       HeapWord* bitmap_limit() const {
 812         return MIN2(const_cast<HeapWord*>(_tams), _mr.end());
 813       }
 814 
 815       void move_if_below_tams() {
 816         if (is_below_tams() && has_next()) {
 817           _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
 818         }
 819       }
 820     public:
 821       LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) :
 822           _bitmap(bitmap),
 823           _tams(tams),
 824           _mr(mr),
 825           _current(first_oop_into_mr) {
 826 
 827         assert(_current <= _mr.start(),
 828                "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")",
 829                p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end()));
 830 
 831         // Step to the next live object within the MemRegion if needed.
 832         if (is_live(_current)) {
 833           // Non-objArrays were scanned by the previous part of that region.
 834           if (_current < mr.start() && !oop(_current)->is_objArray()) {
 835             _current += oop(_current)->size();
 836             // We might have positioned _current on a non-live object. Reposition to the next
 837             // live one if needed.
 838             move_if_below_tams();
 839           }
 840         } else {
 841           // The object at _current can only be dead if below TAMS, so we can use the bitmap.
 842           // immediately.
 843           _current = _bitmap->get_next_marked_addr(_current, bitmap_limit());
 844           assert(_current == _mr.end() || is_live(_current),
 845                  "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")",
 846                  p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end()));
 847         }
 848       }
 849 
 850       void move_to_next() {
 851         _current += next()->size();
 852         move_if_below_tams();
 853       }
 854 
 855       oop next() const {
 856         oop result = oop(_current);
 857         assert(is_live(_current),
 858                "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d",
 859                p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result));
 860         return result;
 861       }
 862 
 863       bool has_next() const {
 864         return _current < _mr.end();
 865       }
 866     };
 867 
 868     // Rebuild remembered sets in the part of the region specified by mr and hr.
 869     // Objects between the bottom of the region and the TAMS are checked for liveness
 870     // using the given bitmap. Objects between TAMS and TARS are assumed to be live.
 871     // Returns the number of live words between bottom and TAMS.
 872     size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap,
 873                                      HeapWord* const top_at_mark_start,
 874                                      HeapWord* const top_at_rebuild_start,
 875                                      HeapRegion* hr,
 876                                      MemRegion mr) {
 877       size_t marked_words = 0;
 878 
 879       if (hr->is_humongous()) {
 880         oop const humongous_obj = oop(hr->humongous_start_region()->bottom());
 881         if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) {
 882           // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start);
 883           // however in case of humongous objects it is sufficient to scan the encompassing
 884           // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the
 885           // two areas will be zero sized. I.e. TAMS is either
 886           // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different
 887           // value: this would mean that TAMS points somewhere into the object.
 888           assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
 889                  "More than one object in the humongous region?");
 890           humongous_obj->oop_iterate(&_update_cl, mr);
 891           return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0;
 892         } else {
 893           return 0;
 894         }
 895       }
 896 
 897       for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
 898         oop obj = it.next();
 899         size_t scanned_size = scan_for_references(obj, mr);
 900         if ((HeapWord*)obj < top_at_mark_start) {
 901           marked_words += scanned_size;
 902         }
 903       }
 904 
 905       return marked_words * HeapWordSize;
 906     }
 907 public:
 908   G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h,
 909                                    G1ConcurrentMark* cm,
 910                                    uint worker_id) :
 911     HeapRegionClosure(),
 912     _cm(cm),
 913     _update_cl(g1h, worker_id) { }
 914 
 915     bool do_heap_region(HeapRegion* hr) {
 916       if (_cm->has_aborted()) {
 917         return true;
 918       }
 919 
 920       uint const region_idx = hr->hrm_index();
 921       DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);)
 922       assert(top_at_rebuild_start_check == NULL ||
 923              top_at_rebuild_start_check > hr->bottom(),
 924              "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)",
 925              p2i(top_at_rebuild_start_check), p2i(hr->bottom()),  region_idx, hr->get_type_str());
 926 
 927       size_t total_marked_bytes = 0;
 928       size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize;
 929 
 930       HeapWord* const top_at_mark_start = hr->prev_top_at_mark_start();
 931 
 932       HeapWord* cur = hr->bottom();
 933       while (cur < hr->end()) {
 934         // After every iteration (yield point) we need to check whether the region's
 935         // TARS changed due to e.g. eager reclaim.
 936         HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);
 937         if (top_at_rebuild_start == NULL) {
 938           return false;
 939         }
 940 
 941         MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words));
 942         if (next_chunk.is_empty()) {
 943           break;
 944         }
 945 
 946         const Ticks start = Ticks::now();
 947         size_t marked_bytes = rebuild_rem_set_in_region(_cm->prev_mark_bitmap(),
 948                                                         top_at_mark_start,
 949                                                         top_at_rebuild_start,
 950                                                         hr,
 951                                                         next_chunk);
 952         Tickspan time = Ticks::now() - start;
 953 
 954         log_trace(gc, remset, tracking)("Rebuilt region %u "
 955                                         "live " SIZE_FORMAT " "
 956                                         "time %.3fms "
 957                                         "marked bytes " SIZE_FORMAT " "
 958                                         "bot " PTR_FORMAT " "
 959                                         "TAMS " PTR_FORMAT " "
 960                                         "TARS " PTR_FORMAT,
 961                                         region_idx,
 962                                         _cm->liveness(region_idx) * HeapWordSize,
 963                                         time.seconds() * 1000.0,
 964                                         marked_bytes,
 965                                         p2i(hr->bottom()),
 966                                         p2i(top_at_mark_start),
 967                                         p2i(top_at_rebuild_start));
 968 
 969         if (marked_bytes > 0) {
 970           total_marked_bytes += marked_bytes;
 971         }
 972         cur += chunk_size_in_words;
 973 
 974         _cm->do_yield_check();
 975         if (_cm->has_aborted()) {
 976           return true;
 977         }
 978       }
 979       // In the final iteration of the loop the region might have been eagerly reclaimed.
 980       // Simply filter out those regions. We can not just use region type because there
 981       // might have already been new allocations into these regions.
 982       DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);)
 983       assert(top_at_rebuild_start == NULL ||
 984              total_marked_bytes == hr->marked_bytes(),
 985              "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match calculated marked bytes " SIZE_FORMAT " "
 986              "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")",
 987              total_marked_bytes, hr->hrm_index(), hr->get_type_str(), hr->marked_bytes(),
 988              p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start));
 989        // Abort state may have changed after the yield check.
 990       return _cm->has_aborted();
 991     }
 992   };
 993 
 994   HeapRegionClaimer _hr_claimer;
 995   G1ConcurrentMark* _cm;
 996 
 997   uint _worker_id_offset;
 998 public:
 999   G1RebuildRemSetTask(G1ConcurrentMark* cm,
1000                       uint n_workers,
1001                       uint worker_id_offset) :
1002       AbstractGangTask("G1 Rebuild Remembered Set"),
1003       _hr_claimer(n_workers),
1004       _cm(cm),
1005       _worker_id_offset(worker_id_offset) {
1006   }
1007 
1008   void work(uint worker_id) {
1009     SuspendibleThreadSetJoiner sts_join;
1010 
1011     G1CollectedHeap* g1h = G1CollectedHeap::heap();
1012 
1013     G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id);
1014     g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
1015   }
1016 };
1017 
1018 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm,
1019                                WorkGang* workers,
1020                                uint worker_id_offset) {
1021   uint num_workers = workers->active_workers();
1022 
1023   G1RebuildRemSetTask cl(cm,
1024                          num_workers,
1025                          worker_id_offset);
1026   workers->run_task(&cl, num_workers);
1027 }