1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Forward decl
  26 class ConcurrentG1RefineThread;
  27 class G1RemSet;
  28 
  29 class ConcurrentG1Refine: public CHeapObj {
  30   ConcurrentG1RefineThread** _threads;
  31   int _n_threads;
  32   int _n_worker_threads;
  33  /*
  34   * The value of the update buffer queue length falls into one of 3 zones:
  35   * green, yellow, red. If the value is in [0, green) nothing is
  36   * done, the buffers are left unprocessed to enable the caching effect of the
  37   * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
  38   * threads are gradually activated. In [yellow, red) all threads are
  39   * running. If the length becomes red (max queue length) the mutators start
  40   * processing the buffers.
  41   *
  42   * There are some interesting cases (when G1UseAdaptiveConcRefinement
  43   * is turned off):
  44   * 1) green = yellow = red = 0. In this case the mutator will process all
  45   *    buffers. Except for those that are created by the deferred updates
  46   *    machinery during a collection.
  47   * 2) green = 0. Means no caching. Can be a good way to minimize the
  48   *    amount of time spent updating rsets during a collection.
  49   */
  50   int _green_zone;
  51   int _yellow_zone;
  52   int _red_zone;
  53 
  54   int _thread_threshold_step;
  55 
  56   // Reset the threshold step value based of the current zone boundaries.
  57   void reset_threshold_step();
  58 
  59   // The cache for card refinement.
  60   bool   _use_cache;
  61   bool   _def_use_cache;
  62 
  63   size_t _n_periods;    // Used as clearing epoch
  64 
  65   // An evicting cache of the number of times each card
  66   // is accessed. Reduces, but does not eliminate, the amount
  67   // of duplicated processing of dirty cards.
  68 
  69   enum SomePrivateConstants {
  70     epoch_bits           = 32,
  71     card_num_shift       = epoch_bits,
  72     epoch_mask           = AllBits,
  73     card_num_mask        = AllBits,
  74 
  75     // The initial cache size is approximately this fraction
  76     // of a maximal cache (i.e. the size needed for all cards
  77     // in the heap)
  78     InitialCacheFraction = 512
  79   };
  80 
  81   const static julong card_num_mask_in_place =
  82                         (julong) card_num_mask << card_num_shift;
  83 
  84   typedef struct {
  85     julong _value;      // |  card_num   |  epoch   |
  86   } CardEpochCacheEntry;
  87 
  88   julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
  89     assert(0 <= card_num && card_num < _max_n_card_counts, "Bounds");
  90     assert(0 <= epoch && epoch <= _n_periods, "must be");
  91 
  92     return ((julong) card_num << card_num_shift) | epoch;
  93   }
  94 
  95   unsigned int extract_epoch(julong v) {
  96     return (v & epoch_mask);
  97   }
  98 
  99   unsigned int extract_card_num(julong v) {
 100     return (v & card_num_mask_in_place) >> card_num_shift;
 101   }
 102 
 103   typedef struct {
 104     unsigned char _count;
 105     unsigned char _evict_count;
 106   } CardCountCacheEntry;
 107 
 108   CardCountCacheEntry* _card_counts;
 109   CardEpochCacheEntry* _card_epochs;
 110 
 111   // The current number of buckets in the card count cache
 112   unsigned _n_card_counts;
 113 
 114   // The max number of buckets required for the number of
 115   // cards for the entire reserved heap
 116   unsigned _max_n_card_counts;
 117 
 118   // Possible sizes of the cache: odd primes that roughly double in size.
 119   // (See jvmtiTagMap.cpp).
 120   static int _cc_cache_sizes[];
 121 
 122   // The index in _cc_cache_sizes corresponding to the size of
 123   // _card_counts.
 124   int _cache_size_index;
 125 
 126   bool _expand_card_counts;
 127 
 128   const jbyte* _ct_bot;
 129 
 130   jbyte**      _hot_cache;
 131   int          _hot_cache_size;
 132   int          _n_hot;
 133   int          _hot_cache_idx;
 134 
 135   int          _hot_cache_par_chunk_size;
 136   volatile int _hot_cache_par_claimed_idx;
 137 
 138   // Needed to workaround 6817995
 139   CardTableModRefBS* _ct_bs;
 140   G1CollectedHeap*   _g1h;
 141 
 142   // Expands the array that holds the card counts to the next size up
 143   void expand_card_count_cache();
 144 
 145   // hash a given key (index of card_ptr) with the specified size
 146   static unsigned int hash(size_t key, int size) {
 147     return (unsigned int) key % size;
 148   }
 149 
 150   // hash a given key (index of card_ptr)
 151   unsigned int hash(size_t key) {
 152     return hash(key, _n_card_counts);
 153   }
 154 
 155   unsigned ptr_2_card_num(jbyte* card_ptr) {
 156     return (unsigned) (card_ptr - _ct_bot);
 157   }
 158 
 159   jbyte* card_num_2_ptr(unsigned card_num) {
 160     return (jbyte*) (_ct_bot + card_num);
 161   }
 162 
 163   // Returns the count of this card after incrementing it.
 164   jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
 165 
 166   // Returns true if this card is in a young region
 167   bool is_young_card(jbyte* card_ptr);
 168 
 169  public:
 170   ConcurrentG1Refine();
 171   ~ConcurrentG1Refine();
 172 
 173   void init(); // Accomplish some initialization that has to wait.
 174   void stop();
 175 
 176   void reinitialize_threads();
 177 
 178   // Iterate over the conc refine threads
 179   void threads_do(ThreadClosure *tc);
 180 
 181   // If this is the first entry for the slot, writes into the cache and
 182   // returns NULL.  If it causes an eviction, returns the evicted pointer.
 183   // Otherwise, its a cache hit, and returns NULL.
 184   jbyte* cache_insert(jbyte* card_ptr, bool* defer);
 185 
 186   // Process the cached entries.
 187   void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
 188 
 189   // Set up for parallel processing of the cards in the hot cache
 190   void clear_hot_cache_claimed_index() {
 191     _hot_cache_par_claimed_idx = 0;
 192   }
 193 
 194   // Discard entries in the hot cache.
 195   void clear_hot_cache() {
 196     _hot_cache_idx = 0; _n_hot = 0;
 197   }
 198 
 199   bool hot_cache_is_empty() { return _n_hot == 0; }
 200 
 201   bool use_cache() { return _use_cache; }
 202   void set_use_cache(bool b) {
 203     if (b) _use_cache = _def_use_cache;
 204     else   _use_cache = false;
 205   }
 206 
 207   void clear_and_record_card_counts();
 208 
 209   static int thread_num();
 210 
 211   void print_worker_threads_on(outputStream* st) const;
 212 
 213   void set_green_zone(int x)  { _green_zone = x;  }
 214   void set_yellow_zone(int x) { _yellow_zone = x; }
 215   void set_red_zone(int x)    { _red_zone = x;    }
 216 
 217   int green_zone() const      { return _green_zone;  }
 218   int yellow_zone() const     { return _yellow_zone; }
 219   int red_zone() const        { return _red_zone;    }
 220 
 221   int total_thread_num() const  { return _n_threads;        }
 222   int worker_thread_num() const { return _n_worker_threads; }
 223 
 224   int thread_threshold_step() const { return _thread_threshold_step; }
 225 };