1 /*
   2  * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1HOTCARDCACHE_HPP
  26 #define SHARE_GC_G1_G1HOTCARDCACHE_HPP
  27 
  28 #include "gc/g1/g1CardCounts.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "runtime/safepoint.hpp"
  31 #include "runtime/thread.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 
  34 class G1CardTableEntryClosure;
  35 class G1CollectedHeap;
  36 class HeapRegion;
  37 
  38 // An evicting cache of cards that have been logged by the G1 post
  39 // write barrier. Placing a card in the cache delays the refinement
  40 // of the card until the card is evicted, or the cache is drained
  41 // during the next evacuation pause.
  42 //
  43 // The first thing the G1 post write barrier does is to check whether
  44 // the card containing the updated pointer is already dirty and, if
  45 // so, skips the remaining code in the barrier.
  46 //
  47 // Delaying the refinement of a card will make the card fail the
  48 // first is_dirty check in the write barrier, skipping the remainder
  49 // of the write barrier.
  50 //
  51 // This can significantly reduce the overhead of the write barrier
  52 // code, increasing throughput.
  53 
  54 class G1HotCardCache: public CHeapObj<mtGC> {
  55 public:
  56   typedef CardTable::CardValue CardValue;
  57 
  58 private:
  59   G1CollectedHeap*  _g1h;
  60 
  61   bool              _use_cache;
  62 
  63   G1CardCounts      _card_counts;
  64 
  65 
  66   // The card cache table
  67   CardValue** _hot_cache;
  68 
  69   size_t            _hot_cache_size;
  70 
  71   size_t            _hot_cache_par_chunk_size;
  72 
  73   // Avoids false sharing when concurrently updating _hot_cache_idx or
  74   // _hot_cache_par_claimed_idx. These are never updated at the same time
  75   // thus it's not necessary to separate them as well
  76   char _pad_before[DEFAULT_CACHE_LINE_SIZE];
  77 
  78   volatile size_t _hot_cache_idx;
  79 
  80   volatile size_t _hot_cache_par_claimed_idx;
  81 
  82   char _pad_after[DEFAULT_CACHE_LINE_SIZE];
  83 
  84   // Records whether insertion overflowed the hot card cache at least once. This
  85   // avoids the need for a separate atomic counter of how many valid entries are
  86   // in the HCC.
  87   volatile bool _cache_wrapped_around;
  88 
  89   // The number of cached cards a thread claims when flushing the cache
  90   static const int ClaimChunkSize = 32;
  91 
  92  public:
  93   static bool default_use_cache() {
  94     return (G1ConcRSLogCacheSize > 0);
  95   }
  96 
  97   G1HotCardCache(G1CollectedHeap* g1h);
  98   ~G1HotCardCache();
  99 
 100   void initialize(G1RegionToSpaceMapper* card_counts_storage);
 101 
 102   bool use_cache() { return _use_cache; }
 103 
 104   void set_use_cache(bool b) {
 105     _use_cache = (b ? default_use_cache() : false);
 106   }
 107 
 108   // Returns the card to be refined or NULL.
 109   //
 110   // Increments the count for given the card. if the card is not 'hot',
 111   // it is returned for immediate refining. Otherwise the card is
 112   // added to the hot card cache.
 113   // If there is enough room in the hot card cache for the card we're
 114   // adding, NULL is returned and no further action in needed.
 115   // If we evict a card from the cache to make room for the new card,
 116   // the evicted card is then returned for refinement.
 117   CardValue* insert(CardValue* card_ptr);
 118 
 119   // Refine the cards that have delayed as a result of
 120   // being in the cache.
 121   void drain(G1CardTableEntryClosure* cl, uint worker_id);
 122 
 123   // Set up for parallel processing of the cards in the hot cache
 124   void reset_hot_cache_claimed_index() {
 125     _hot_cache_par_claimed_idx = 0;
 126   }
 127 
 128   // Resets the hot card cache and discards the entries.
 129   void reset_hot_cache() {
 130     assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
 131     assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
 132     if (default_use_cache()) {
 133       reset_hot_cache_internal();
 134     }
 135   }
 136 
 137   // Zeros the values in the card counts table for the given region
 138   void reset_card_counts(HeapRegion* hr);
 139 
 140   // Number of entries in the HCC.
 141   size_t num_entries() const {
 142     return _cache_wrapped_around ? _hot_cache_size : _hot_cache_idx + 1;
 143   }
 144  private:
 145   void reset_hot_cache_internal() {
 146     assert(_hot_cache != NULL, "Logic");
 147     _hot_cache_idx = 0;
 148     for (size_t i = 0; i < _hot_cache_size; i++) {
 149       _hot_cache[i] = NULL;
 150     }
 151     _cache_wrapped_around = false;
 152   }
 153 };
 154 
 155 #endif // SHARE_GC_G1_G1HOTCARDCACHE_HPP