1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/cardTableModRefBS.hpp"
  30 #include "runtime/thread.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 // Forward decl
  34 class ConcurrentG1RefineThread;
  35 class G1RemSet;
  36 
  37 class ConcurrentG1Refine: public CHeapObj {
  38   ConcurrentG1RefineThread** _threads;
  39   int _n_threads;
  40   int _n_worker_threads;
  41  /*
  42   * The value of the update buffer queue length falls into one of 3 zones:
  43   * green, yellow, red. If the value is in [0, green) nothing is
  44   * done, the buffers are left unprocessed to enable the caching effect of the
  45   * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
  46   * threads are gradually activated. In [yellow, red) all threads are
  47   * running. If the length becomes red (max queue length) the mutators start
  48   * processing the buffers.
  49   *
  50   * There are some interesting cases (when G1UseAdaptiveConcRefinement
  51   * is turned off):
  52   * 1) green = yellow = red = 0. In this case the mutator will process all
  53   *    buffers. Except for those that are created by the deferred updates
  54   *    machinery during a collection.
  55   * 2) green = 0. Means no caching. Can be a good way to minimize the
  56   *    amount of time spent updating rsets during a collection.
  57   */
  58   int _green_zone;
  59   int _yellow_zone;
  60   int _red_zone;
  61 
  62   int _thread_threshold_step;
  63 
  64   // Reset the threshold step value based of the current zone boundaries.
  65   void reset_threshold_step();
  66 
  67   // The cache for card refinement.
  68   bool   _use_cache;
  69   bool   _def_use_cache;
  70 
  71   size_t _n_periods;    // Used as clearing epoch
  72 
  73   // An evicting cache of the number of times each card
  74   // is accessed. Reduces, but does not eliminate, the amount
  75   // of duplicated processing of dirty cards.
  76 
  77   enum SomePrivateConstants {
  78     epoch_bits           = 32,
  79     card_num_shift       = epoch_bits,
  80     epoch_mask           = AllBits,
  81     card_num_mask        = AllBits,
  82 
  83     // The initial cache size is approximately this fraction
  84     // of a maximal cache (i.e. the size needed for all cards
  85     // in the heap)
  86     InitialCacheFraction = 512
  87   };
  88 
  89   const static julong card_num_mask_in_place =
  90                         (julong) card_num_mask << card_num_shift;
  91 
  92   typedef struct {
  93     julong _value;      // |  card_num   |  epoch   |
  94   } CardEpochCacheEntry;
  95 
  96   julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
  97     assert(0 <= card_num && card_num < _max_n_card_counts, "Bounds");
  98     assert(0 <= epoch && epoch <= _n_periods, "must be");
  99 
 100     return ((julong) card_num << card_num_shift) | epoch;
 101   }
 102 
 103   unsigned int extract_epoch(julong v) {
 104     return (v & epoch_mask);
 105   }
 106 
 107   unsigned int extract_card_num(julong v) {
 108     return (v & card_num_mask_in_place) >> card_num_shift;
 109   }
 110 
 111   typedef struct {
 112     unsigned char _count;
 113     unsigned char _evict_count;
 114   } CardCountCacheEntry;
 115 
 116   CardCountCacheEntry* _card_counts;
 117   CardEpochCacheEntry* _card_epochs;
 118 
 119   // The current number of buckets in the card count cache
 120   unsigned _n_card_counts;
 121 
 122   // The max number of buckets required for the number of
 123   // cards for the entire reserved heap
 124   unsigned _max_n_card_counts;
 125 
 126   // Possible sizes of the cache: odd primes that roughly double in size.
 127   // (See jvmtiTagMap.cpp).
 128   static int _cc_cache_sizes[];
 129 
 130   // The index in _cc_cache_sizes corresponding to the size of
 131   // _card_counts.
 132   int _cache_size_index;
 133 
 134   bool _expand_card_counts;
 135 
 136   const jbyte* _ct_bot;
 137 
 138   jbyte**      _hot_cache;
 139   int          _hot_cache_size;
 140   int          _n_hot;
 141   int          _hot_cache_idx;
 142 
 143   int          _hot_cache_par_chunk_size;
 144   volatile int _hot_cache_par_claimed_idx;
 145 
 146   // Needed to workaround 6817995
 147   CardTableModRefBS* _ct_bs;
 148   G1CollectedHeap*   _g1h;
 149 
 150   // Expands the array that holds the card counts to the next size up
 151   void expand_card_count_cache();
 152 
 153   // hash a given key (index of card_ptr) with the specified size
 154   static unsigned int hash(size_t key, int size) {
 155     return (unsigned int) key % size;
 156   }
 157 
 158   // hash a given key (index of card_ptr)
 159   unsigned int hash(size_t key) {
 160     return hash(key, _n_card_counts);
 161   }
 162 
 163   unsigned ptr_2_card_num(jbyte* card_ptr) {
 164     return (unsigned) (card_ptr - _ct_bot);
 165   }
 166 
 167   jbyte* card_num_2_ptr(unsigned card_num) {
 168     return (jbyte*) (_ct_bot + card_num);
 169   }
 170 
 171   // Returns the count of this card after incrementing it.
 172   jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
 173 
 174   // Returns true if this card is in a young region
 175   bool is_young_card(jbyte* card_ptr);
 176 
 177  public:
 178   ConcurrentG1Refine();
 179   ~ConcurrentG1Refine();
 180 
 181   void init(); // Accomplish some initialization that has to wait.
 182   void stop();
 183 
 184   void reinitialize_threads();
 185 
 186   // Iterate over the conc refine threads
 187   void threads_do(ThreadClosure *tc);
 188 
 189   // If this is the first entry for the slot, writes into the cache and
 190   // returns NULL.  If it causes an eviction, returns the evicted pointer.
 191   // Otherwise, its a cache hit, and returns NULL.
 192   jbyte* cache_insert(jbyte* card_ptr, bool* defer);
 193 
 194   // Process the cached entries.
 195   void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
 196 
 197   // Set up for parallel processing of the cards in the hot cache
 198   void clear_hot_cache_claimed_index() {
 199     _hot_cache_par_claimed_idx = 0;
 200   }
 201 
 202   // Discard entries in the hot cache.
 203   void clear_hot_cache() {
 204     _hot_cache_idx = 0; _n_hot = 0;
 205   }
 206 
 207   bool hot_cache_is_empty() { return _n_hot == 0; }
 208 
 209   bool use_cache() { return _use_cache; }
 210   void set_use_cache(bool b) {
 211     if (b) _use_cache = _def_use_cache;
 212     else   _use_cache = false;
 213   }
 214 
 215   void clear_and_record_card_counts();
 216 
 217   static int thread_num();
 218 
 219   void print_worker_threads_on(outputStream* st) const;
 220 
 221   void set_green_zone(int x)  { _green_zone = x;  }
 222   void set_yellow_zone(int x) { _yellow_zone = x; }
 223   void set_red_zone(int x)    { _red_zone = x;    }
 224 
 225   int green_zone() const      { return _green_zone;  }
 226   int yellow_zone() const     { return _yellow_zone; }
 227   int red_zone() const        { return _red_zone;    }
 228 
 229   int total_thread_num() const  { return _n_threads;        }
 230   int worker_thread_num() const { return _n_worker_threads; }
 231 
 232   int thread_threshold_step() const { return _thread_threshold_step; }
 233 };
 234 
 235 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP