1 /* 2 * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 26 // enumerate ref fields that have been modified (since the last 27 // enumeration.) 28 29 // As it currently stands, this barrier is *imprecise*: when a ref field in 30 // an object "o" is modified, the card table entry for the card containing 31 // the head of "o" is dirtied, not necessarily the card containing the 32 // modified field itself. For object arrays, however, the barrier *is* 33 // precise; only the card containing the modified element is dirtied. 34 // Any MemRegionClosures used to scan dirty cards should take these 35 // considerations into account. 36 37 class Generation; 38 class OopsInGenClosure; 39 class DirtyCardToOopClosure; 40 41 class CardTableModRefBS: public ModRefBarrierSet { 42 // Some classes get to look at some private stuff. 43 friend class BytecodeInterpreter; 44 friend class VMStructs; 45 friend class CardTableRS; 46 friend class CheckForUnmarkedOops; // Needs access to raw card bytes. 47 friend class SharkBuilder; 48 #ifndef PRODUCT 49 // For debugging. 50 friend class GuaranteeNotModClosure; 51 #endif 52 protected: 53 54 enum CardValues { 55 clean_card = -1, 56 // The mask contains zeros in places for all other values. 57 clean_card_mask = clean_card - 31, 58 59 dirty_card = 0, 60 precleaned_card = 1, 61 claimed_card = 2, 62 deferred_card = 4, 63 last_card = 8, 64 CT_MR_BS_last_reserved = 16 65 }; 66 67 // dirty and precleaned are equivalent wrt younger_refs_iter. 68 static bool card_is_dirty_wrt_gen_iter(jbyte cv) { 69 return cv == dirty_card || cv == precleaned_card; 70 } 71 72 // Returns "true" iff the value "cv" will cause the card containing it 73 // to be scanned in the current traversal. May be overridden by 74 // subtypes. 75 virtual bool card_will_be_scanned(jbyte cv) { 76 return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv); 77 } 78 79 // Returns "true" iff the value "cv" may have represented a dirty card at 80 // some point. 81 virtual bool card_may_have_been_dirty(jbyte cv) { 82 return card_is_dirty_wrt_gen_iter(cv); 83 } 84 85 // The declaration order of these const fields is important; see the 86 // constructor before changing. 87 const MemRegion _whole_heap; // the region covered by the card table 88 const size_t _guard_index; // index of very last element in the card 89 // table; it is set to a guard value 90 // (last_card) and should never be modified 91 const size_t _last_valid_index; // index of the last valid element 92 const size_t _page_size; // page size used when mapping _byte_map 93 const size_t _byte_map_size; // in bytes 94 jbyte* _byte_map; // the card marking array 95 96 int _cur_covered_regions; 97 // The covered regions should be in address order. 98 MemRegion* _covered; 99 // The committed regions correspond one-to-one to the covered regions. 100 // They represent the card-table memory that has been committed to service 101 // the corresponding covered region. It may be that committed region for 102 // one covered region corresponds to a larger region because of page-size 103 // roundings. Thus, a committed region for one covered region may 104 // actually extend onto the card-table space for the next covered region. 105 MemRegion* _committed; 106 107 // The last card is a guard card, and we commit the page for it so 108 // we can use the card for verification purposes. We make sure we never 109 // uncommit the MemRegion for that page. 110 MemRegion _guard_region; 111 112 protected: 113 // Initialization utilities; covered_words is the size of the covered region 114 // in, um, words. 115 inline size_t cards_required(size_t covered_words); 116 inline size_t compute_byte_map_size(); 117 118 // Finds and return the index of the region, if any, to which the given 119 // region would be contiguous. If none exists, assign a new region and 120 // returns its index. Requires that no more than the maximum number of 121 // covered regions defined in the constructor are ever in use. 122 int find_covering_region_by_base(HeapWord* base); 123 124 // Same as above, but finds the region containing the given address 125 // instead of starting at a given base address. 126 int find_covering_region_containing(HeapWord* addr); 127 128 // Resize one of the regions covered by the remembered set. 129 void resize_covered_region(MemRegion new_region); 130 131 // Returns the leftmost end of a committed region corresponding to a 132 // covered region before covered region "ind", or else "NULL" if "ind" is 133 // the first covered region. 134 HeapWord* largest_prev_committed_end(int ind) const; 135 136 // Returns the part of the region mr that doesn't intersect with 137 // any committed region other than self. Used to prevent uncommitting 138 // regions that are also committed by other regions. Also protects 139 // against uncommitting the guard region. 140 MemRegion committed_unique_to_self(int self, MemRegion mr) const; 141 142 // Mapping from address to card marking array entry 143 jbyte* byte_for(const void* p) const { 144 assert(_whole_heap.contains(p), 145 "out of bounds access to card marking array"); 146 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; 147 assert(result >= _byte_map && result < _byte_map + _byte_map_size, 148 "out of bounds accessor for card marking array"); 149 return result; 150 } 151 152 // The card table byte one after the card marking array 153 // entry for argument address. Typically used for higher bounds 154 // for loops iterating through the card table. 155 jbyte* byte_after(const void* p) const { 156 return byte_for(p) + 1; 157 } 158 159 // Iterate over the portion of the card-table which covers the given 160 // region mr in the given space and apply cl to any dirty sub-regions 161 // of mr. cl and dcto_cl must either be the same closure or cl must 162 // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl 163 // may be modified. Note that this function will operate in a parallel 164 // mode if worker threads are available. 165 void non_clean_card_iterate(Space* sp, MemRegion mr, 166 DirtyCardToOopClosure* dcto_cl, 167 MemRegionClosure* cl, 168 bool clear); 169 170 // Utility function used to implement the other versions below. 171 void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl, 172 bool clear); 173 174 void par_non_clean_card_iterate_work(Space* sp, MemRegion mr, 175 DirtyCardToOopClosure* dcto_cl, 176 MemRegionClosure* cl, 177 bool clear, 178 int n_threads); 179 180 // Dirty the bytes corresponding to "mr" (not all of which must be 181 // covered.) 182 void dirty_MemRegion(MemRegion mr); 183 184 // Clear (to clean_card) the bytes entirely contained within "mr" (not 185 // all of which must be covered.) 186 void clear_MemRegion(MemRegion mr); 187 188 // *** Support for parallel card scanning. 189 190 enum SomeConstantsForParallelism { 191 StridesPerThread = 2, 192 CardsPerStrideChunk = 256 193 }; 194 195 // This is an array, one element per covered region of the card table. 196 // Each entry is itself an array, with one element per chunk in the 197 // covered region. Each entry of these arrays is the lowest non-clean 198 // card of the corresponding chunk containing part of an object from the 199 // previous chunk, or else NULL. 200 typedef jbyte* CardPtr; 201 typedef CardPtr* CardArr; 202 CardArr* _lowest_non_clean; 203 size_t* _lowest_non_clean_chunk_size; 204 uintptr_t* _lowest_non_clean_base_chunk_index; 205 int* _last_LNC_resizing_collection; 206 207 // Initializes "lowest_non_clean" to point to the array for the region 208 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk 209 // index of the corresponding to the first element of that array. 210 // Ensures that these arrays are of sufficient size, allocating if necessary. 211 // May be called by several threads concurrently. 212 void get_LNC_array_for_space(Space* sp, 213 jbyte**& lowest_non_clean, 214 uintptr_t& lowest_non_clean_base_chunk_index, 215 size_t& lowest_non_clean_chunk_size); 216 217 // Returns the number of chunks necessary to cover "mr". 218 size_t chunks_to_cover(MemRegion mr) { 219 return (size_t)(addr_to_chunk_index(mr.last()) - 220 addr_to_chunk_index(mr.start()) + 1); 221 } 222 223 // Returns the index of the chunk in a stride which 224 // covers the given address. 225 uintptr_t addr_to_chunk_index(const void* addr) { 226 uintptr_t card = (uintptr_t) byte_for(addr); 227 return card / CardsPerStrideChunk; 228 } 229 230 // Apply cl, which must either itself apply dcto_cl or be dcto_cl, 231 // to the cards in the stride (of n_strides) within the given space. 232 void process_stride(Space* sp, 233 MemRegion used, 234 jint stride, int n_strides, 235 DirtyCardToOopClosure* dcto_cl, 236 MemRegionClosure* cl, 237 bool clear, 238 jbyte** lowest_non_clean, 239 uintptr_t lowest_non_clean_base_chunk_index, 240 size_t lowest_non_clean_chunk_size); 241 242 // Makes sure that chunk boundaries are handled appropriately, by 243 // adjusting the min_done of dcto_cl, and by using a special card-table 244 // value to indicate how min_done should be set. 245 void process_chunk_boundaries(Space* sp, 246 DirtyCardToOopClosure* dcto_cl, 247 MemRegion chunk_mr, 248 MemRegion used, 249 jbyte** lowest_non_clean, 250 uintptr_t lowest_non_clean_base_chunk_index, 251 size_t lowest_non_clean_chunk_size); 252 253 public: 254 // Constants 255 enum SomePublicConstants { 256 card_shift = 9, 257 card_size = 1 << card_shift, 258 card_size_in_words = card_size / sizeof(HeapWord) 259 }; 260 261 static int clean_card_val() { return clean_card; } 262 static int clean_card_mask_val() { return clean_card_mask; } 263 static int dirty_card_val() { return dirty_card; } 264 static int claimed_card_val() { return claimed_card; } 265 static int precleaned_card_val() { return precleaned_card; } 266 static int deferred_card_val() { return deferred_card; } 267 268 // For RTTI simulation. 269 bool is_a(BarrierSet::Name bsn) { 270 return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn); 271 } 272 273 CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); 274 275 // *** Barrier set functions. 276 277 bool has_write_ref_pre_barrier() { return false; } 278 279 inline bool write_ref_needs_barrier(void* field, oop new_val) { 280 // Note that this assumes the perm gen is the highest generation 281 // in the address space 282 return new_val != NULL && !new_val->is_perm(); 283 } 284 285 // Record a reference update. Note that these versions are precise! 286 // The scanning code has to handle the fact that the write barrier may be 287 // either precise or imprecise. We make non-virtual inline variants of 288 // these functions here for performance. 289 protected: 290 void write_ref_field_work(oop obj, size_t offset, oop newVal); 291 virtual void write_ref_field_work(void* field, oop newVal); 292 public: 293 294 bool has_write_ref_array_opt() { return true; } 295 bool has_write_region_opt() { return true; } 296 297 inline void inline_write_region(MemRegion mr) { 298 dirty_MemRegion(mr); 299 } 300 protected: 301 void write_region_work(MemRegion mr) { 302 inline_write_region(mr); 303 } 304 public: 305 306 inline void inline_write_ref_array(MemRegion mr) { 307 dirty_MemRegion(mr); 308 } 309 protected: 310 void write_ref_array_work(MemRegion mr) { 311 inline_write_ref_array(mr); 312 } 313 public: 314 315 bool is_aligned(HeapWord* addr) { 316 return is_card_aligned(addr); 317 } 318 319 // *** Card-table-barrier-specific things. 320 321 template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {} 322 323 template <class T> inline void inline_write_ref_field(T* field, oop newVal) { 324 jbyte* byte = byte_for((void*)field); 325 *byte = dirty_card; 326 } 327 328 // These are used by G1, when it uses the card table as a temporary data 329 // structure for card claiming. 330 bool is_card_dirty(size_t card_index) { 331 return _byte_map[card_index] == dirty_card_val(); 332 } 333 334 void mark_card_dirty(size_t card_index) { 335 _byte_map[card_index] = dirty_card_val(); 336 } 337 338 bool is_card_claimed(size_t card_index) { 339 jbyte val = _byte_map[card_index]; 340 return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); 341 } 342 343 void set_card_claimed(size_t card_index) { 344 jbyte val = _byte_map[card_index]; 345 if (val == clean_card_val()) { 346 val = (jbyte)claimed_card_val(); 347 } else { 348 val |= (jbyte)claimed_card_val(); 349 } 350 _byte_map[card_index] = val; 351 } 352 353 bool claim_card(size_t card_index); 354 355 bool is_card_clean(size_t card_index) { 356 return _byte_map[card_index] == clean_card_val(); 357 } 358 359 bool is_card_deferred(size_t card_index) { 360 jbyte val = _byte_map[card_index]; 361 return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); 362 } 363 364 bool mark_card_deferred(size_t card_index); 365 366 // Card marking array base (adjusted for heap low boundary) 367 // This would be the 0th element of _byte_map, if the heap started at 0x0. 368 // But since the heap starts at some higher address, this points to somewhere 369 // before the beginning of the actual _byte_map. 370 jbyte* byte_map_base; 371 372 // Return true if "p" is at the start of a card. 373 bool is_card_aligned(HeapWord* p) { 374 jbyte* pcard = byte_for(p); 375 return (addr_for(pcard) == p); 376 } 377 378 // The kinds of precision a CardTableModRefBS may offer. 379 enum PrecisionStyle { 380 Precise, 381 ObjHeadPreciseArray 382 }; 383 384 // Tells what style of precision this card table offers. 385 PrecisionStyle precision() { 386 return ObjHeadPreciseArray; // Only one supported for now. 387 } 388 389 // ModRefBS functions. 390 virtual void invalidate(MemRegion mr, bool whole_heap = false); 391 void clear(MemRegion mr); 392 void dirty(MemRegion mr); 393 void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, 394 bool clear = false, 395 bool before_save_marks = false); 396 397 // *** Card-table-RemSet-specific things. 398 399 // Invoke "cl.do_MemRegion" on a set of MemRegions that collectively 400 // includes all the modified cards (expressing each card as a 401 // MemRegion). Thus, several modified cards may be lumped into one 402 // region. The regions are non-overlapping, and are visited in 403 // *decreasing* address order. (This order aids with imprecise card 404 // marking, where a dirty card may cause scanning, and summarization 405 // marking, of objects that extend onto subsequent cards.) 406 // If "clear" is true, the card is (conceptually) marked unmodified before 407 // applying the closure. 408 void mod_card_iterate(MemRegionClosure* cl, bool clear = false) { 409 non_clean_card_iterate_work(_whole_heap, cl, clear); 410 } 411 412 // Like the "mod_cards_iterate" above, except only invokes the closure 413 // for cards within the MemRegion "mr" (which is required to be 414 // card-aligned and sized.) 415 void mod_card_iterate(MemRegion mr, MemRegionClosure* cl, 416 bool clear = false) { 417 non_clean_card_iterate_work(mr, cl, clear); 418 } 419 420 static uintx ct_max_alignment_constraint(); 421 422 // Apply closure "cl" to the dirty cards containing some part of 423 // MemRegion "mr". 424 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); 425 426 // Return the MemRegion corresponding to the first maximal run 427 // of dirty cards lying completely within MemRegion mr. 428 // If reset is "true", then sets those card table entries to the given 429 // value. 430 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, 431 int reset_val); 432 433 // Set all the dirty cards in the given region to precleaned state. 434 void preclean_dirty_cards(MemRegion mr); 435 436 // Provide read-only access to the card table array. 437 const jbyte* byte_for_const(const void* p) const { 438 return byte_for(p); 439 } 440 const jbyte* byte_after_const(const void* p) const { 441 return byte_after(p); 442 } 443 444 // Mapping from card marking array entry to address of first word 445 HeapWord* addr_for(const jbyte* p) const { 446 assert(p >= _byte_map && p < _byte_map + _byte_map_size, 447 "out of bounds access to card marking array"); 448 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); 449 HeapWord* result = (HeapWord*) (delta << card_shift); 450 assert(_whole_heap.contains(result), 451 "out of bounds accessor from card marking array"); 452 return result; 453 } 454 455 // Mapping from address to card marking array index. 456 size_t index_for(void* p) { 457 assert(_whole_heap.contains(p), 458 "out of bounds access to card marking array"); 459 return byte_for(p) - _byte_map; 460 } 461 462 const jbyte* byte_for_index(const size_t card_index) const { 463 return _byte_map + card_index; 464 } 465 466 void verify(); 467 void verify_guard(); 468 469 void verify_clean_region(MemRegion mr) PRODUCT_RETURN; 470 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; 471 472 static size_t par_chunk_heapword_alignment() { 473 return CardsPerStrideChunk * card_size_in_words; 474 } 475 476 }; 477 478 class CardTableRS; 479 480 // A specialization for the CardTableRS gen rem set. 481 class CardTableModRefBSForCTRS: public CardTableModRefBS { 482 CardTableRS* _rs; 483 protected: 484 bool card_will_be_scanned(jbyte cv); 485 bool card_may_have_been_dirty(jbyte cv); 486 public: 487 CardTableModRefBSForCTRS(MemRegion whole_heap, 488 int max_covered_regions) : 489 CardTableModRefBS(whole_heap, max_covered_regions) {} 490 491 void set_CTRS(CardTableRS* rs) { _rs = rs; } 492 };