1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_CARDTABLERS_HPP 26 #define SHARE_VM_GC_SHARED_CARDTABLERS_HPP 27 28 #include "gc/shared/cardTable.hpp" 29 #include "memory/memRegion.hpp" 30 #include "oops/oop.hpp" 31 32 class DirtyCardToOopClosure; 33 class Generation; 34 class Space; 35 class OopsInGenClosure; 36 37 // Helper to remember modified oops in all clds. 38 class CLDRemSet { 39 bool _accumulate_modified_oops; 40 public: 41 CLDRemSet() : _accumulate_modified_oops(false) {} 42 void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; } 43 bool accumulate_modified_oops() { return _accumulate_modified_oops; } 44 bool mod_union_is_clear(); 45 void clear_mod_union(); 46 }; 47 48 // This RemSet uses a card table both as shared data structure 49 // for a mod ref barrier set and for the rem set information. 50 51 class CardTableRS: public CardTable { 52 friend class VMStructs; 53 // Below are private classes used in impl. 54 friend class VerifyCTSpaceClosure; 55 friend class ClearNoncleanCardWrapper; 56 57 CLDRemSet _cld_rem_set; 58 59 void verify_space(Space* s, HeapWord* gen_start); 60 61 enum ExtendedCardValue { 62 youngergen_card = CT_MR_BS_last_reserved + 1, 63 // These are for parallel collection. 64 // There are three P (parallel) youngergen card values. In general, this 65 // needs to be more than the number of generations (including the perm 66 // gen) that might have younger_refs_do invoked on them separately. So 67 // if we add more gens, we have to add more values. 68 youngergenP1_card = CT_MR_BS_last_reserved + 2, 69 youngergenP2_card = CT_MR_BS_last_reserved + 3, 70 youngergenP3_card = CT_MR_BS_last_reserved + 4, 71 cur_youngergen_and_prev_nonclean_card = 72 CT_MR_BS_last_reserved + 5 73 }; 74 75 // An array that contains, for each generation, the card table value last 76 // used as the current value for a younger_refs_do iteration of that 77 // portion of the table. The perm gen is index 0. The young gen is index 1, 78 // but will always have the value "clean_card". The old gen is index 2. 79 jbyte* _last_cur_val_in_gen; 80 81 jbyte _cur_youngergen_card_val; 82 83 // Number of generations, plus one for lingering PermGen issues in CardTableRS. 84 static const int _regions_to_iterate = 3; 85 86 jbyte cur_youngergen_card_val() { 87 return _cur_youngergen_card_val; 88 } 89 void set_cur_youngergen_card_val(jbyte v) { 90 _cur_youngergen_card_val = v; 91 } 92 bool is_prev_youngergen_card_val(jbyte v) { 93 return 94 youngergen_card <= v && 95 v < cur_youngergen_and_prev_nonclean_card && 96 v != _cur_youngergen_card_val; 97 } 98 // Return a youngergen_card_value that is not currently in use. 99 jbyte find_unused_youngergenP_card_value(); 100 101 public: 102 CardTableRS(MemRegion whole_heap); 103 ~CardTableRS(); 104 105 CLDRemSet* cld_rem_set() { return &_cld_rem_set; } 106 107 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); 108 109 // Override. 110 void prepare_for_younger_refs_iterate(bool parallel); 111 112 // Card table entries are cleared before application; "blk" is 113 // responsible for dirtying if the oop is still older-to-younger after 114 // closure application. 115 void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads); 116 117 void inline_write_ref_field_gc(void* field, oop new_val) { 118 jbyte* byte = byte_for(field); 119 *byte = youngergen_card; 120 } 121 void write_ref_field_gc_work(void* field, oop new_val) { 122 inline_write_ref_field_gc(field, new_val); 123 } 124 125 // Override. Might want to devirtualize this in the same fashion as 126 // above. Ensures that the value of the card for field says that it's 127 // a younger card in the current collection. 128 virtual void write_ref_field_gc_par(void* field, oop new_val); 129 130 bool is_aligned(HeapWord* addr) { 131 return is_card_aligned(addr); 132 } 133 134 void verify(); 135 void initialize(); 136 137 void clear_into_younger(Generation* old_gen); 138 139 void invalidate_or_clear(Generation* old_gen); 140 141 bool is_prev_nonclean_card_val(jbyte v) { 142 return 143 youngergen_card <= v && 144 v <= cur_youngergen_and_prev_nonclean_card && 145 v != _cur_youngergen_card_val; 146 } 147 148 static bool youngergen_may_have_been_dirty(jbyte cv) { 149 return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card; 150 } 151 152 // *** Support for parallel card scanning. 153 154 // dirty and precleaned are equivalent wrt younger_refs_iter. 155 static bool card_is_dirty_wrt_gen_iter(jbyte cv) { 156 return cv == dirty_card || cv == precleaned_card; 157 } 158 159 // Returns "true" iff the value "cv" will cause the card containing it 160 // to be scanned in the current traversal. May be overridden by 161 // subtypes. 162 bool card_will_be_scanned(jbyte cv); 163 164 // Returns "true" iff the value "cv" may have represented a dirty card at 165 // some point. 166 bool card_may_have_been_dirty(jbyte cv); 167 168 // Iterate over the portion of the card-table which covers the given 169 // region mr in the given space and apply cl to any dirty sub-regions 170 // of mr. Clears the dirty cards as they are processed. 171 void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, 172 OopsInGenClosure* cl, CardTableRS* ct, 173 uint n_threads); 174 175 // Work method used to implement non_clean_card_iterate_possibly_parallel() 176 // above in the parallel case. 177 void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, 178 OopsInGenClosure* cl, CardTableRS* ct, 179 uint n_threads); 180 181 // This is an array, one element per covered region of the card table. 182 // Each entry is itself an array, with one element per chunk in the 183 // covered region. Each entry of these arrays is the lowest non-clean 184 // card of the corresponding chunk containing part of an object from the 185 // previous chunk, or else NULL. 186 typedef jbyte* CardPtr; 187 typedef CardPtr* CardArr; 188 CardArr* _lowest_non_clean; 189 size_t* _lowest_non_clean_chunk_size; 190 uintptr_t* _lowest_non_clean_base_chunk_index; 191 volatile int* _last_LNC_resizing_collection; 192 193 // Initializes "lowest_non_clean" to point to the array for the region 194 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk 195 // index of the corresponding to the first element of that array. 196 // Ensures that these arrays are of sufficient size, allocating if necessary. 197 // May be called by several threads concurrently. 198 void get_LNC_array_for_space(Space* sp, 199 jbyte**& lowest_non_clean, 200 uintptr_t& lowest_non_clean_base_chunk_index, 201 size_t& lowest_non_clean_chunk_size); 202 203 // Returns the number of chunks necessary to cover "mr". 204 size_t chunks_to_cover(MemRegion mr) { 205 return (size_t)(addr_to_chunk_index(mr.last()) - 206 addr_to_chunk_index(mr.start()) + 1); 207 } 208 209 // Returns the index of the chunk in a stride which 210 // covers the given address. 211 uintptr_t addr_to_chunk_index(const void* addr) { 212 uintptr_t card = (uintptr_t) byte_for(addr); 213 return card / ParGCCardsPerStrideChunk; 214 } 215 216 // Apply cl, which must either itself apply dcto_cl or be dcto_cl, 217 // to the cards in the stride (of n_strides) within the given space. 218 void process_stride(Space* sp, 219 MemRegion used, 220 jint stride, int n_strides, 221 OopsInGenClosure* cl, 222 CardTableRS* ct, 223 jbyte** lowest_non_clean, 224 uintptr_t lowest_non_clean_base_chunk_index, 225 size_t lowest_non_clean_chunk_size); 226 227 // Makes sure that chunk boundaries are handled appropriately, by 228 // adjusting the min_done of dcto_cl, and by using a special card-table 229 // value to indicate how min_done should be set. 230 void process_chunk_boundaries(Space* sp, 231 DirtyCardToOopClosure* dcto_cl, 232 MemRegion chunk_mr, 233 MemRegion used, 234 jbyte** lowest_non_clean, 235 uintptr_t lowest_non_clean_base_chunk_index, 236 size_t lowest_non_clean_chunk_size); 237 238 virtual bool is_in_young(oop obj) const; 239 240 }; 241 242 class ClearNoncleanCardWrapper: public MemRegionClosure { 243 DirtyCardToOopClosure* _dirty_card_closure; 244 CardTableRS* _ct; 245 bool _is_par; 246 private: 247 // Clears the given card, return true if the corresponding card should be 248 // processed. 249 inline bool clear_card(jbyte* entry); 250 // Work methods called by the clear_card() 251 inline bool clear_card_serial(jbyte* entry); 252 inline bool clear_card_parallel(jbyte* entry); 253 // check alignment of pointer 254 bool is_word_aligned(jbyte* entry); 255 256 public: 257 ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par); 258 void do_MemRegion(MemRegion mr); 259 }; 260 261 #endif // SHARE_VM_GC_SHARED_CARDTABLERS_HPP