1 /* 2 * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and 26 // enumerate ref fields that have been modified (since the last 27 // enumeration.) 28 29 // As it currently stands, this barrier is *imprecise*: when a ref field in 30 // an object "o" is modified, the card table entry for the card containing 31 // the head of "o" is dirtied, not necessarily the card containing the 32 // modified field itself. For object arrays, however, the barrier *is* 33 // precise; only the card containing the modified element is dirtied. 34 // Any MemRegionClosures used to scan dirty cards should take these 35 // considerations into account. 36 37 class Generation; 38 class OopsInGenClosure; 39 class DirtyCardToOopClosure; 40 41 class CardTableModRefBS: public ModRefBarrierSet { 42 // Some classes get to look at some private stuff. 43 friend class BytecodeInterpreter; 44 friend class VMStructs; 45 friend class CardTableRS; 46 friend class CheckForUnmarkedOops; // Needs access to raw card bytes. 47 #ifndef PRODUCT 48 // For debugging. 49 friend class GuaranteeNotModClosure; 50 #endif 51 protected: 52 53 enum CardValues { 54 clean_card = -1, 55 // The mask contains zeros in places for all other values. 56 clean_card_mask = clean_card - 31, 57 58 dirty_card = 0, 59 precleaned_card = 1, 60 claimed_card = 2, 61 deferred_card = 4, 62 last_card = 8, 63 CT_MR_BS_last_reserved = 16 64 }; 65 66 // dirty and precleaned are equivalent wrt younger_refs_iter. 67 static bool card_is_dirty_wrt_gen_iter(jbyte cv) { 68 return cv == dirty_card || cv == precleaned_card; 69 } 70 71 // Returns "true" iff the value "cv" will cause the card containing it 72 // to be scanned in the current traversal. May be overridden by 73 // subtypes. 74 virtual bool card_will_be_scanned(jbyte cv) { 75 return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv); 76 } 77 78 // Returns "true" iff the value "cv" may have represented a dirty card at 79 // some point. 80 virtual bool card_may_have_been_dirty(jbyte cv) { 81 return card_is_dirty_wrt_gen_iter(cv); 82 } 83 84 // The declaration order of these const fields is important; see the 85 // constructor before changing. 86 const MemRegion _whole_heap; // the region covered by the card table 87 const size_t _guard_index; // index of very last element in the card 88 // table; it is set to a guard value 89 // (last_card) and should never be modified 90 const size_t _last_valid_index; // index of the last valid element 91 const size_t _page_size; // page size used when mapping _byte_map 92 const size_t _byte_map_size; // in bytes 93 jbyte* _byte_map; // the card marking array 94 95 int _cur_covered_regions; 96 // The covered regions should be in address order. 97 MemRegion* _covered; 98 // The committed regions correspond one-to-one to the covered regions. 99 // They represent the card-table memory that has been committed to service 100 // the corresponding covered region. It may be that committed region for 101 // one covered region corresponds to a larger region because of page-size 102 // roundings. Thus, a committed region for one covered region may 103 // actually extend onto the card-table space for the next covered region. 104 MemRegion* _committed; 105 106 // The last card is a guard card, and we commit the page for it so 107 // we can use the card for verification purposes. We make sure we never 108 // uncommit the MemRegion for that page. 109 MemRegion _guard_region; 110 111 protected: 112 // Initialization utilities; covered_words is the size of the covered region 113 // in, um, words. 114 inline size_t cards_required(size_t covered_words); 115 inline size_t compute_byte_map_size(); 116 117 // Finds and return the index of the region, if any, to which the given 118 // region would be contiguous. If none exists, assign a new region and 119 // returns its index. Requires that no more than the maximum number of 120 // covered regions defined in the constructor are ever in use. 121 int find_covering_region_by_base(HeapWord* base); 122 123 // Same as above, but finds the region containing the given address 124 // instead of starting at a given base address. 125 int find_covering_region_containing(HeapWord* addr); 126 127 // Resize one of the regions covered by the remembered set. 128 void resize_covered_region(MemRegion new_region); 129 130 // Returns the leftmost end of a committed region corresponding to a 131 // covered region before covered region "ind", or else "NULL" if "ind" is 132 // the first covered region. 133 HeapWord* largest_prev_committed_end(int ind) const; 134 135 // Returns the part of the region mr that doesn't intersect with 136 // any committed region other than self. Used to prevent uncommitting 137 // regions that are also committed by other regions. Also protects 138 // against uncommitting the guard region. 139 MemRegion committed_unique_to_self(int self, MemRegion mr) const; 140 141 // Mapping from address to card marking array entry 142 jbyte* byte_for(const void* p) const { 143 assert(_whole_heap.contains(p), 144 "out of bounds access to card marking array"); 145 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; 146 assert(result >= _byte_map && result < _byte_map + _byte_map_size, 147 "out of bounds accessor for card marking array"); 148 return result; 149 } 150 151 // The card table byte one after the card marking array 152 // entry for argument address. Typically used for higher bounds 153 // for loops iterating through the card table. 154 jbyte* byte_after(const void* p) const { 155 return byte_for(p) + 1; 156 } 157 158 // Iterate over the portion of the card-table which covers the given 159 // region mr in the given space and apply cl to any dirty sub-regions 160 // of mr. cl and dcto_cl must either be the same closure or cl must 161 // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl 162 // may be modified. Note that this function will operate in a parallel 163 // mode if worker threads are available. 164 void non_clean_card_iterate(Space* sp, MemRegion mr, 165 DirtyCardToOopClosure* dcto_cl, 166 MemRegionClosure* cl, 167 bool clear); 168 169 // Utility function used to implement the other versions below. 170 void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl, 171 bool clear); 172 173 void par_non_clean_card_iterate_work(Space* sp, MemRegion mr, 174 DirtyCardToOopClosure* dcto_cl, 175 MemRegionClosure* cl, 176 bool clear, 177 int n_threads); 178 179 // Dirty the bytes corresponding to "mr" (not all of which must be 180 // covered.) 181 void dirty_MemRegion(MemRegion mr); 182 183 // Clear (to clean_card) the bytes entirely contained within "mr" (not 184 // all of which must be covered.) 185 void clear_MemRegion(MemRegion mr); 186 187 // *** Support for parallel card scanning. 188 189 enum SomeConstantsForParallelism { 190 StridesPerThread = 2, 191 CardsPerStrideChunk = 256 192 }; 193 194 // This is an array, one element per covered region of the card table. 195 // Each entry is itself an array, with one element per chunk in the 196 // covered region. Each entry of these arrays is the lowest non-clean 197 // card of the corresponding chunk containing part of an object from the 198 // previous chunk, or else NULL. 199 typedef jbyte* CardPtr; 200 typedef CardPtr* CardArr; 201 CardArr* _lowest_non_clean; 202 size_t* _lowest_non_clean_chunk_size; 203 uintptr_t* _lowest_non_clean_base_chunk_index; 204 int* _last_LNC_resizing_collection; 205 206 // Initializes "lowest_non_clean" to point to the array for the region 207 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk 208 // index of the corresponding to the first element of that array. 209 // Ensures that these arrays are of sufficient size, allocating if necessary. 210 // May be called by several threads concurrently. 211 void get_LNC_array_for_space(Space* sp, 212 jbyte**& lowest_non_clean, 213 uintptr_t& lowest_non_clean_base_chunk_index, 214 size_t& lowest_non_clean_chunk_size); 215 216 // Returns the number of chunks necessary to cover "mr". 217 size_t chunks_to_cover(MemRegion mr) { 218 return (size_t)(addr_to_chunk_index(mr.last()) - 219 addr_to_chunk_index(mr.start()) + 1); 220 } 221 222 // Returns the index of the chunk in a stride which 223 // covers the given address. 224 uintptr_t addr_to_chunk_index(const void* addr) { 225 uintptr_t card = (uintptr_t) byte_for(addr); 226 return card / CardsPerStrideChunk; 227 } 228 229 // Apply cl, which must either itself apply dcto_cl or be dcto_cl, 230 // to the cards in the stride (of n_strides) within the given space. 231 void process_stride(Space* sp, 232 MemRegion used, 233 jint stride, int n_strides, 234 DirtyCardToOopClosure* dcto_cl, 235 MemRegionClosure* cl, 236 bool clear, 237 jbyte** lowest_non_clean, 238 uintptr_t lowest_non_clean_base_chunk_index, 239 size_t lowest_non_clean_chunk_size); 240 241 // Makes sure that chunk boundaries are handled appropriately, by 242 // adjusting the min_done of dcto_cl, and by using a special card-table 243 // value to indicate how min_done should be set. 244 void process_chunk_boundaries(Space* sp, 245 DirtyCardToOopClosure* dcto_cl, 246 MemRegion chunk_mr, 247 MemRegion used, 248 jbyte** lowest_non_clean, 249 uintptr_t lowest_non_clean_base_chunk_index, 250 size_t lowest_non_clean_chunk_size); 251 252 public: 253 // Constants 254 enum SomePublicConstants { 255 card_shift = 9, 256 card_size = 1 << card_shift, 257 card_size_in_words = card_size / sizeof(HeapWord) 258 }; 259 260 static int clean_card_val() { return clean_card; } 261 static int clean_card_mask_val() { return clean_card_mask; } 262 static int dirty_card_val() { return dirty_card; } 263 static int claimed_card_val() { return claimed_card; } 264 static int precleaned_card_val() { return precleaned_card; } 265 static int deferred_card_val() { return deferred_card; } 266 267 // For RTTI simulation. 268 bool is_a(BarrierSet::Name bsn) { 269 return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn); 270 } 271 272 CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); 273 274 // *** Barrier set functions. 275 276 bool has_write_ref_pre_barrier() { return false; } 277 278 inline bool write_ref_needs_barrier(void* field, oop new_val) { 279 // Note that this assumes the perm gen is the highest generation 280 // in the address space 281 return new_val != NULL && !new_val->is_perm(); 282 } 283 284 // Record a reference update. Note that these versions are precise! 285 // The scanning code has to handle the fact that the write barrier may be 286 // either precise or imprecise. We make non-virtual inline variants of 287 // these functions here for performance. 288 protected: 289 void write_ref_field_work(oop obj, size_t offset, oop newVal); 290 virtual void write_ref_field_work(void* field, oop newVal); 291 public: 292 293 bool has_write_ref_array_opt() { return true; } 294 bool has_write_region_opt() { return true; } 295 296 inline void inline_write_region(MemRegion mr) { 297 dirty_MemRegion(mr); 298 } 299 protected: 300 void write_region_work(MemRegion mr) { 301 inline_write_region(mr); 302 } 303 public: 304 305 inline void inline_write_ref_array(MemRegion mr) { 306 dirty_MemRegion(mr); 307 } 308 protected: 309 void write_ref_array_work(MemRegion mr) { 310 inline_write_ref_array(mr); 311 } 312 public: 313 314 bool is_aligned(HeapWord* addr) { 315 return is_card_aligned(addr); 316 } 317 318 // *** Card-table-barrier-specific things. 319 320 template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {} 321 322 template <class T> inline void inline_write_ref_field(T* field, oop newVal) { 323 jbyte* byte = byte_for((void*)field); 324 *byte = dirty_card; 325 } 326 327 // These are used by G1, when it uses the card table as a temporary data 328 // structure for card claiming. 329 bool is_card_dirty(size_t card_index) { 330 return _byte_map[card_index] == dirty_card_val(); 331 } 332 333 void mark_card_dirty(size_t card_index) { 334 _byte_map[card_index] = dirty_card_val(); 335 } 336 337 bool is_card_claimed(size_t card_index) { 338 jbyte val = _byte_map[card_index]; 339 return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); 340 } 341 342 void set_card_claimed(size_t card_index) { 343 jbyte val = _byte_map[card_index]; 344 if (val == clean_card_val()) { 345 val = (jbyte)claimed_card_val(); 346 } else { 347 val |= (jbyte)claimed_card_val(); 348 } 349 _byte_map[card_index] = val; 350 } 351 352 bool claim_card(size_t card_index); 353 354 bool is_card_clean(size_t card_index) { 355 return _byte_map[card_index] == clean_card_val(); 356 } 357 358 bool is_card_deferred(size_t card_index) { 359 jbyte val = _byte_map[card_index]; 360 return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); 361 } 362 363 bool mark_card_deferred(size_t card_index); 364 365 // Card marking array base (adjusted for heap low boundary) 366 // This would be the 0th element of _byte_map, if the heap started at 0x0. 367 // But since the heap starts at some higher address, this points to somewhere 368 // before the beginning of the actual _byte_map. 369 jbyte* byte_map_base; 370 371 // Return true if "p" is at the start of a card. 372 bool is_card_aligned(HeapWord* p) { 373 jbyte* pcard = byte_for(p); 374 return (addr_for(pcard) == p); 375 } 376 377 // The kinds of precision a CardTableModRefBS may offer. 378 enum PrecisionStyle { 379 Precise, 380 ObjHeadPreciseArray 381 }; 382 383 // Tells what style of precision this card table offers. 384 PrecisionStyle precision() { 385 return ObjHeadPreciseArray; // Only one supported for now. 386 } 387 388 // ModRefBS functions. 389 virtual void invalidate(MemRegion mr, bool whole_heap = false); 390 void clear(MemRegion mr); 391 void dirty(MemRegion mr); 392 void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, 393 bool clear = false, 394 bool before_save_marks = false); 395 396 // *** Card-table-RemSet-specific things. 397 398 // Invoke "cl.do_MemRegion" on a set of MemRegions that collectively 399 // includes all the modified cards (expressing each card as a 400 // MemRegion). Thus, several modified cards may be lumped into one 401 // region. The regions are non-overlapping, and are visited in 402 // *decreasing* address order. (This order aids with imprecise card 403 // marking, where a dirty card may cause scanning, and summarization 404 // marking, of objects that extend onto subsequent cards.) 405 // If "clear" is true, the card is (conceptually) marked unmodified before 406 // applying the closure. 407 void mod_card_iterate(MemRegionClosure* cl, bool clear = false) { 408 non_clean_card_iterate_work(_whole_heap, cl, clear); 409 } 410 411 // Like the "mod_cards_iterate" above, except only invokes the closure 412 // for cards within the MemRegion "mr" (which is required to be 413 // card-aligned and sized.) 414 void mod_card_iterate(MemRegion mr, MemRegionClosure* cl, 415 bool clear = false) { 416 non_clean_card_iterate_work(mr, cl, clear); 417 } 418 419 static uintx ct_max_alignment_constraint(); 420 421 // Apply closure "cl" to the dirty cards containing some part of 422 // MemRegion "mr". 423 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl); 424 425 // Return the MemRegion corresponding to the first maximal run 426 // of dirty cards lying completely within MemRegion mr. 427 // If reset is "true", then sets those card table entries to the given 428 // value. 429 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset, 430 int reset_val); 431 432 // Set all the dirty cards in the given region to precleaned state. 433 void preclean_dirty_cards(MemRegion mr); 434 435 // Provide read-only access to the card table array. 436 const jbyte* byte_for_const(const void* p) const { 437 return byte_for(p); 438 } 439 const jbyte* byte_after_const(const void* p) const { 440 return byte_after(p); 441 } 442 443 // Mapping from card marking array entry to address of first word 444 HeapWord* addr_for(const jbyte* p) const { 445 assert(p >= _byte_map && p < _byte_map + _byte_map_size, 446 "out of bounds access to card marking array"); 447 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); 448 HeapWord* result = (HeapWord*) (delta << card_shift); 449 assert(_whole_heap.contains(result), 450 "out of bounds accessor from card marking array"); 451 return result; 452 } 453 454 // Mapping from address to card marking array index. 455 size_t index_for(void* p) { 456 assert(_whole_heap.contains(p), 457 "out of bounds access to card marking array"); 458 return byte_for(p) - _byte_map; 459 } 460 461 const jbyte* byte_for_index(const size_t card_index) const { 462 return _byte_map + card_index; 463 } 464 465 void verify(); 466 void verify_guard(); 467 468 void verify_clean_region(MemRegion mr) PRODUCT_RETURN; 469 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; 470 471 static size_t par_chunk_heapword_alignment() { 472 return CardsPerStrideChunk * card_size_in_words; 473 } 474 475 }; 476 477 class CardTableRS; 478 479 // A specialization for the CardTableRS gen rem set. 480 class CardTableModRefBSForCTRS: public CardTableModRefBS { 481 CardTableRS* _rs; 482 protected: 483 bool card_will_be_scanned(jbyte cv); 484 bool card_may_have_been_dirty(jbyte cv); 485 public: 486 CardTableModRefBSForCTRS(MemRegion whole_heap, 487 int max_covered_regions) : 488 CardTableModRefBS(whole_heap, max_covered_regions) {} 489 490 void set_CTRS(CardTableRS* rs) { _rs = rs; } 491 };