1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP 26 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP 27 28 #include "gc/shared/modRefBarrierSet.hpp" 29 #include "utilities/align.hpp" 30 31 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 32 // enumerate ref fields that have been modified (since the last 33 // enumeration.) 34 35 // As it currently stands, this barrier is *imprecise*: when a ref field in 36 // an object "o" is modified, the card table entry for the card containing 37 // the head of "o" is dirtied, not necessarily the card containing the 38 // modified field itself. For object arrays, however, the barrier *is* 39 // precise; only the card containing the modified element is dirtied. 40 // Closures used to scan dirty cards should take these 41 // considerations into account. 42 43 class CardTableModRefBS: public ModRefBarrierSet { 44 // Some classes get to look at some private stuff. 45 friend class VMStructs; 46 protected: 47 48 enum CardValues { 49 clean_card = -1, 50 // The mask contains zeros in places for all other values. 51 clean_card_mask = clean_card - 31, 52 53 dirty_card = 0, 54 precleaned_card = 1, 55 claimed_card = 2, 56 deferred_card = 4, 57 last_card = 8, 58 CT_MR_BS_last_reserved = 16 59 }; 60 61 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 62 // or INCLUDE_JVMCI is being used 63 bool _defer_initial_card_mark; 64 65 // a word's worth (row) of clean card values 66 static const intptr_t clean_card_row = (intptr_t)(-1); 67 68 // The declaration order of these const fields is important; see the 69 // constructor before changing. 70 const MemRegion _whole_heap; // the region covered by the card table 71 size_t _guard_index; // index of very last element in the card 72 // table; it is set to a guard value 73 // (last_card) and should never be modified 74 size_t _last_valid_index; // index of the last valid element 75 const size_t _page_size; // page size used when mapping _byte_map 76 size_t _byte_map_size; // in bytes 77 jbyte* _byte_map; // the card marking array 78 79 // Some barrier sets create tables whose elements correspond to parts of 80 // the heap; the CardTableModRefBS is an example. Such barrier sets will 81 // normally reserve space for such tables, and commit parts of the table 82 // "covering" parts of the heap that are committed. At most one covered 83 // region per generation is needed. 84 static const int _max_covered_regions = 2; 85 86 int _cur_covered_regions; 87 88 // The covered regions should be in address order. 89 MemRegion* _covered; 90 // The committed regions correspond one-to-one to the covered regions. 91 // They represent the card-table memory that has been committed to service 92 // the corresponding covered region. It may be that committed region for 93 // one covered region corresponds to a larger region because of page-size 94 // roundings. Thus, a committed region for one covered region may 95 // actually extend onto the card-table space for the next covered region. 96 MemRegion* _committed; 97 98 // The last card is a guard card, and we commit the page for it so 99 // we can use the card for verification purposes. We make sure we never 100 // uncommit the MemRegion for that page. 101 MemRegion _guard_region; 102 103 inline size_t compute_byte_map_size(); 104 105 // Finds and return the index of the region, if any, to which the given 106 // region would be contiguous. If none exists, assign a new region and 107 // returns its index. Requires that no more than the maximum number of 108 // covered regions defined in the constructor are ever in use. 109 int find_covering_region_by_base(HeapWord* base); 110 111 // Same as above, but finds the region containing the given address 112 // instead of starting at a given base address. 113 int find_covering_region_containing(HeapWord* addr); 114 115 // Resize one of the regions covered by the remembered set. 116 virtual void resize_covered_region(MemRegion new_region); 117 118 // Returns the leftmost end of a committed region corresponding to a 119 // covered region before covered region "ind", or else "NULL" if "ind" is 120 // the first covered region. 121 HeapWord* largest_prev_committed_end(int ind) const; 122 123 // Returns the part of the region mr that doesn't intersect with 124 // any committed region other than self. Used to prevent uncommitting 125 // regions that are also committed by other regions. Also protects 126 // against uncommitting the guard region. 127 MemRegion committed_unique_to_self(int self, MemRegion mr) const; 128 129 // Mapping from address to card marking array entry 130 jbyte* byte_for(const void* p) const { 131 assert(_whole_heap.contains(p), 132 "Attempt to access p = " PTR_FORMAT " out of bounds of " 133 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", 134 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); 135 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; 136 assert(result >= _byte_map && result < _byte_map + _byte_map_size, 137 "out of bounds accessor for card marking array"); 138 return result; 139 } 140 141 // The card table byte one after the card marking array 142 // entry for argument address. Typically used for higher bounds 143 // for loops iterating through the card table. 144 jbyte* byte_after(const void* p) const { 145 return byte_for(p) + 1; 146 } 147 148 // Dirty the bytes corresponding to "mr" (not all of which must be 149 // covered.) 150 void dirty_MemRegion(MemRegion mr); 151 152 // Clear (to clean_card) the bytes entirely contained within "mr" (not 153 // all of which must be covered.) 154 void clear_MemRegion(MemRegion mr); 155 156 public: 157 // Constants 158 enum SomePublicConstants { 159 card_shift = 9, 160 card_size = 1 << card_shift, 161 card_size_in_words = card_size / sizeof(HeapWord) 162 }; 163 164 static int clean_card_val() { return clean_card; } 165 static int clean_card_mask_val() { return clean_card_mask; } 166 static int dirty_card_val() { return dirty_card; } 167 static int claimed_card_val() { return claimed_card; } 168 static int precleaned_card_val() { return precleaned_card; } 169 static int deferred_card_val() { return deferred_card; } 170 171 virtual void initialize(); 172 173 // *** Barrier set functions. 174 175 // Initialization utilities; covered_words is the size of the covered region 176 // in, um, words. 177 inline size_t cards_required(size_t covered_words) { 178 // Add one for a guard card, used to detect errors. 179 const size_t words = align_up(covered_words, card_size_in_words); 180 return words / card_size_in_words + 1; 181 } 182 183 protected: 184 CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); 185 ~CardTableModRefBS(); 186 187 public: 188 void write_region(MemRegion mr) { 189 dirty_MemRegion(mr); 190 } 191 192 protected: 193 void write_ref_array_work(MemRegion mr) { 194 dirty_MemRegion(mr); 195 } 196 197 public: 198 bool is_aligned(HeapWord* addr) { 199 return is_card_aligned(addr); 200 } 201 202 // *** Card-table-barrier-specific things. 203 204 // Record a reference update. Note that these versions are precise! 205 // The scanning code has to handle the fact that the write barrier may be 206 // either precise or imprecise. We make non-virtual inline variants of 207 // these functions here for performance. 208 template <DecoratorSet decorators, typename T> 209 void write_ref_field_post(T* field, oop newVal); 210 211 // These are used by G1, when it uses the card table as a temporary data 212 // structure for card claiming. 213 bool is_card_dirty(size_t card_index) { 214 return _byte_map[card_index] == dirty_card_val(); 215 } 216 217 void mark_card_dirty(size_t card_index) { 218 _byte_map[card_index] = dirty_card_val(); 219 } 220 221 bool is_card_clean(size_t card_index) { 222 return _byte_map[card_index] == clean_card_val(); 223 } 224 225 // Card marking array base (adjusted for heap low boundary) 226 // This would be the 0th element of _byte_map, if the heap started at 0x0. 227 // But since the heap starts at some higher address, this points to somewhere 228 // before the beginning of the actual _byte_map. 229 jbyte* byte_map_base; 230 231 // Return true if "p" is at the start of a card. 232 bool is_card_aligned(HeapWord* p) { 233 jbyte* pcard = byte_for(p); 234 return (addr_for(pcard) == p); 235 } 236 237 HeapWord* align_to_card_boundary(HeapWord* p) { 238 jbyte* pcard = byte_for(p + card_size_in_words - 1); 239 return addr_for(pcard); 240 } 241 242 // The kinds of precision a CardTableModRefBS may offer. 243 enum PrecisionStyle { 244 Precise, 245 ObjHeadPreciseArray 246 }; 247 248 // Tells what style of precision this card table offers. 249 PrecisionStyle precision() { 250 return ObjHeadPreciseArray; // Only one supported for now. 251 } 252 253 // ModRefBS functions. 254 virtual void invalidate(MemRegion mr); 255 void clear(MemRegion mr); 256 void dirty(MemRegion mr); 257 258 // *** Card-table-RemSet-specific things. 259 260 static uintx ct_max_alignment_constraint(); 261 262 // Apply closure "cl" to the dirty cards containing some part of 263 // MemRegion "mr". 264 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); 265 266 // Return the MemRegion corresponding to the first maximal run 267 // of dirty cards lying completely within MemRegion mr. 268 // If reset is "true", then sets those card table entries to the given 269 // value. 270 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, 271 int reset_val); 272 273 // Provide read-only access to the card table array. 274 const jbyte* byte_for_const(const void* p) const { 275 return byte_for(p); 276 } 277 const jbyte* byte_after_const(const void* p) const { 278 return byte_after(p); 279 } 280 281 // Mapping from card marking array entry to address of first word 282 HeapWord* addr_for(const jbyte* p) const { 283 assert(p >= _byte_map && p < _byte_map + _byte_map_size, 284 "out of bounds access to card marking array. p: " PTR_FORMAT 285 " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT, 286 p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size)); 287 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); 288 HeapWord* result = (HeapWord*) (delta << card_shift); 289 assert(_whole_heap.contains(result), 290 "Returning result = " PTR_FORMAT " out of bounds of " 291 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", 292 p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end())); 293 return result; 294 } 295 296 // Mapping from address to card marking array index. 297 size_t index_for(void* p) { 298 assert(_whole_heap.contains(p), 299 "Attempt to access p = " PTR_FORMAT " out of bounds of " 300 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", 301 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); 302 return byte_for(p) - _byte_map; 303 } 304 305 const jbyte* byte_for_index(const size_t card_index) const { 306 return _byte_map + card_index; 307 } 308 309 // Print a description of the memory for the barrier set 310 virtual void print_on(outputStream* st) const; 311 312 void verify(); 313 void verify_guard(); 314 315 // val_equals -> it will check that all cards covered by mr equal val 316 // !val_equals -> it will check that all cards covered by mr do not equal val 317 void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; 318 void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; 319 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; 320 321 // ReduceInitialCardMarks 322 void initialize_deferred_card_mark_barriers(); 323 324 // If the CollectedHeap was asked to defer a store barrier above, 325 // this informs it to flush such a deferred store barrier to the 326 // remembered set. 327 void flush_deferred_card_mark_barrier(JavaThread* thread); 328 329 // Can a compiler initialize a new object without store barriers? 330 // This permission only extends from the creation of a new object 331 // via a TLAB up to the first subsequent safepoint. If such permission 332 // is granted for this heap type, the compiler promises to call 333 // defer_store_barrier() below on any slow path allocation of 334 // a new object for which such initializing store barriers will 335 // have been elided. G1, like CMS, allows this, but should be 336 // ready to provide a compensating write barrier as necessary 337 // if that storage came out of a non-young region. The efficiency 338 // of this implementation depends crucially on being able to 339 // answer very efficiently in constant time whether a piece of 340 // storage in the heap comes from a young region or not. 341 // See ReduceInitialCardMarks. 342 virtual bool can_elide_tlab_store_barriers() const { 343 return true; 344 } 345 346 // If a compiler is eliding store barriers for TLAB-allocated objects, 347 // we will be informed of a slow-path allocation by a call 348 // to on_slowpath_allocation_exit() below. Such a call precedes the 349 // initialization of the object itself, and no post-store-barriers will 350 // be issued. Some heap types require that the barrier strictly follows 351 // the initializing stores. (This is currently implemented by deferring the 352 // barrier until the next slow-path allocation or gc-related safepoint.) 353 // This interface answers whether a particular barrier type needs the card 354 // mark to be thus strictly sequenced after the stores. 355 virtual bool card_mark_must_follow_store() const = 0; 356 357 virtual bool is_in_young(oop obj) const = 0; 358 359 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); 360 virtual void flush_deferred_barriers(JavaThread* thread); 361 362 virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); } 363 364 template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS> 365 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {}; 366 }; 367 368 template<> 369 struct BarrierSet::GetName<CardTableModRefBS> { 370 static const BarrierSet::Name value = BarrierSet::CardTableModRef; 371 }; 372 373 template<> 374 struct BarrierSet::GetType<BarrierSet::CardTableModRef> { 375 typedef CardTableModRefBS type; 376 }; 377 378 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP