53 dirty_card = 0,
54 precleaned_card = 1,
55 claimed_card = 2,
56 deferred_card = 4,
57 last_card = 8,
58 CT_MR_BS_last_reserved = 16
59 };
60
61 // a word's worth (row) of clean card values
62 static const intptr_t clean_card_row = (intptr_t)(-1);
63
64 // The declaration order of these const fields is important; see the
65 // constructor before changing.
66 const MemRegion _whole_heap; // the region covered by the card table
67 size_t _guard_index; // index of very last element in the card
68 // table; it is set to a guard value
69 // (last_card) and should never be modified
70 size_t _last_valid_index; // index of the last valid element
71 const size_t _page_size; // page size used when mapping _byte_map
72 size_t _byte_map_size; // in bytes
73 jbyte* _byte_map; // the card marking array
74
75 int _cur_covered_regions;
76 // The covered regions should be in address order.
77 MemRegion* _covered;
78 // The committed regions correspond one-to-one to the covered regions.
79 // They represent the card-table memory that has been committed to service
80 // the corresponding covered region. It may be that committed region for
81 // one covered region corresponds to a larger region because of page-size
82 // roundings. Thus, a committed region for one covered region may
83 // actually extend onto the card-table space for the next covered region.
84 MemRegion* _committed;
85
86 // The last card is a guard card, and we commit the page for it so
87 // we can use the card for verification purposes. We make sure we never
88 // uncommit the MemRegion for that page.
89 MemRegion _guard_region;
90
91 protected:
92 inline size_t compute_byte_map_size();
93
99
100 // Same as above, but finds the region containing the given address
101 // instead of starting at a given base address.
102 int find_covering_region_containing(HeapWord* addr);
103
104 // Resize one of the regions covered by the remembered set.
105 virtual void resize_covered_region(MemRegion new_region);
106
107 // Returns the leftmost end of a committed region corresponding to a
108 // covered region before covered region "ind", or else "NULL" if "ind" is
109 // the first covered region.
110 HeapWord* largest_prev_committed_end(int ind) const;
111
112 // Returns the part of the region mr that doesn't intersect with
113 // any committed region other than self. Used to prevent uncommitting
114 // regions that are also committed by other regions. Also protects
115 // against uncommitting the guard region.
116 MemRegion committed_unique_to_self(int self, MemRegion mr) const;
117
118 // Mapping from address to card marking array entry
119 jbyte* byte_for(const void* p) const {
120 assert(_whole_heap.contains(p),
121 "Attempt to access p = " PTR_FORMAT " out of bounds of "
122 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
123 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
124 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
125 assert(result >= _byte_map && result < _byte_map + _byte_map_size,
126 "out of bounds accessor for card marking array");
127 return result;
128 }
129
130 // The card table byte one after the card marking array
131 // entry for argument address. Typically used for higher bounds
132 // for loops iterating through the card table.
133 jbyte* byte_after(const void* p) const {
134 return byte_for(p) + 1;
135 }
136
137 protected:
138 // Dirty the bytes corresponding to "mr" (not all of which must be
139 // covered.)
140 void dirty_MemRegion(MemRegion mr);
141
142 // Clear (to clean_card) the bytes entirely contained within "mr" (not
143 // all of which must be covered.)
144 void clear_MemRegion(MemRegion mr);
145
146 public:
147 // Constants
148 enum SomePublicConstants {
149 card_shift = 9,
150 card_size = 1 << card_shift,
151 card_size_in_words = card_size / sizeof(HeapWord)
152 };
153
218 template <class T> inline void inline_write_ref_field(T* field, oop newVal, bool release);
219
220 // These are used by G1, when it uses the card table as a temporary data
221 // structure for card claiming.
222 bool is_card_dirty(size_t card_index) {
223 return _byte_map[card_index] == dirty_card_val();
224 }
225
226 void mark_card_dirty(size_t card_index) {
227 _byte_map[card_index] = dirty_card_val();
228 }
229
230 bool is_card_clean(size_t card_index) {
231 return _byte_map[card_index] == clean_card_val();
232 }
233
234 // Card marking array base (adjusted for heap low boundary)
235 // This would be the 0th element of _byte_map, if the heap started at 0x0.
236 // But since the heap starts at some higher address, this points to somewhere
237 // before the beginning of the actual _byte_map.
238 jbyte* byte_map_base;
239
240 // Return true if "p" is at the start of a card.
241 bool is_card_aligned(HeapWord* p) {
242 jbyte* pcard = byte_for(p);
243 return (addr_for(pcard) == p);
244 }
245
246 HeapWord* align_to_card_boundary(HeapWord* p) {
247 jbyte* pcard = byte_for(p + card_size_in_words - 1);
248 return addr_for(pcard);
249 }
250
251 // The kinds of precision a CardTableModRefBS may offer.
252 enum PrecisionStyle {
253 Precise,
254 ObjHeadPreciseArray
255 };
256
257 // Tells what style of precision this card table offers.
258 PrecisionStyle precision() {
259 return ObjHeadPreciseArray; // Only one supported for now.
260 }
261
262 // ModRefBS functions.
263 virtual void invalidate(MemRegion mr, bool whole_heap = false);
264 void clear(MemRegion mr);
265 void dirty(MemRegion mr);
266
267 // *** Card-table-RemSet-specific things.
268
269 static uintx ct_max_alignment_constraint();
270
271 // Apply closure "cl" to the dirty cards containing some part of
272 // MemRegion "mr".
273 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
274
275 // Return the MemRegion corresponding to the first maximal run
276 // of dirty cards lying completely within MemRegion mr.
277 // If reset is "true", then sets those card table entries to the given
278 // value.
279 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
280 int reset_val);
281
282 // Provide read-only access to the card table array.
283 const jbyte* byte_for_const(const void* p) const {
284 return byte_for(p);
285 }
286 const jbyte* byte_after_const(const void* p) const {
287 return byte_after(p);
288 }
289
290 // Mapping from card marking array entry to address of first word
291 HeapWord* addr_for(const jbyte* p) const {
292 assert(p >= _byte_map && p < _byte_map + _byte_map_size,
293 "out of bounds access to card marking array");
294 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
295 HeapWord* result = (HeapWord*) (delta << card_shift);
296 assert(_whole_heap.contains(result),
297 "Returning result = " PTR_FORMAT " out of bounds of "
298 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
299 p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
300 return result;
301 }
302
303 // Mapping from address to card marking array index.
304 size_t index_for(void* p) {
305 assert(_whole_heap.contains(p),
306 "Attempt to access p = " PTR_FORMAT " out of bounds of "
307 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
308 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
309 return byte_for(p) - _byte_map;
310 }
311
312 const jbyte* byte_for_index(const size_t card_index) const {
313 return _byte_map + card_index;
314 }
315
316 // Print a description of the memory for the barrier set
317 virtual void print_on(outputStream* st) const;
318
319 void verify();
320 void verify_guard();
321
322 // val_equals -> it will check that all cards covered by mr equal val
323 // !val_equals -> it will check that all cards covered by mr do not equal val
324 void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
325 void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
326 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
327 };
328
329 template<>
330 struct BarrierSet::GetName<CardTableModRefBS> {
331 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
332 };
|
53 dirty_card = 0,
54 precleaned_card = 1,
55 claimed_card = 2,
56 deferred_card = 4,
57 last_card = 8,
58 CT_MR_BS_last_reserved = 16
59 };
60
61 // a word's worth (row) of clean card values
62 static const intptr_t clean_card_row = (intptr_t)(-1);
63
64 // The declaration order of these const fields is important; see the
65 // constructor before changing.
66 const MemRegion _whole_heap; // the region covered by the card table
67 size_t _guard_index; // index of very last element in the card
68 // table; it is set to a guard value
69 // (last_card) and should never be modified
70 size_t _last_valid_index; // index of the last valid element
71 const size_t _page_size; // page size used when mapping _byte_map
72 size_t _byte_map_size; // in bytes
73 volatile jbyte* _byte_map; // the card marking array
74
75 int _cur_covered_regions;
76 // The covered regions should be in address order.
77 MemRegion* _covered;
78 // The committed regions correspond one-to-one to the covered regions.
79 // They represent the card-table memory that has been committed to service
80 // the corresponding covered region. It may be that committed region for
81 // one covered region corresponds to a larger region because of page-size
82 // roundings. Thus, a committed region for one covered region may
83 // actually extend onto the card-table space for the next covered region.
84 MemRegion* _committed;
85
86 // The last card is a guard card, and we commit the page for it so
87 // we can use the card for verification purposes. We make sure we never
88 // uncommit the MemRegion for that page.
89 MemRegion _guard_region;
90
91 protected:
92 inline size_t compute_byte_map_size();
93
99
100 // Same as above, but finds the region containing the given address
101 // instead of starting at a given base address.
102 int find_covering_region_containing(HeapWord* addr);
103
104 // Resize one of the regions covered by the remembered set.
105 virtual void resize_covered_region(MemRegion new_region);
106
107 // Returns the leftmost end of a committed region corresponding to a
108 // covered region before covered region "ind", or else "NULL" if "ind" is
109 // the first covered region.
110 HeapWord* largest_prev_committed_end(int ind) const;
111
112 // Returns the part of the region mr that doesn't intersect with
113 // any committed region other than self. Used to prevent uncommitting
114 // regions that are also committed by other regions. Also protects
115 // against uncommitting the guard region.
116 MemRegion committed_unique_to_self(int self, MemRegion mr) const;
117
118 // Mapping from address to card marking array entry
119 volatile jbyte* byte_for(const void* p) const {
120 assert(_whole_heap.contains(p),
121 "Attempt to access p = " PTR_FORMAT " out of bounds of "
122 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
123 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
124 volatile jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
125 assert(result >= _byte_map && result < _byte_map + _byte_map_size,
126 "out of bounds accessor for card marking array");
127 return result;
128 }
129
130 // The card table byte one after the card marking array
131 // entry for argument address. Typically used for higher bounds
132 // for loops iterating through the card table.
133 volatile jbyte* byte_after(const void* p) const {
134 return byte_for(p) + 1;
135 }
136
137 protected:
138 // Dirty the bytes corresponding to "mr" (not all of which must be
139 // covered.)
140 void dirty_MemRegion(MemRegion mr);
141
142 // Clear (to clean_card) the bytes entirely contained within "mr" (not
143 // all of which must be covered.)
144 void clear_MemRegion(MemRegion mr);
145
146 public:
147 // Constants
148 enum SomePublicConstants {
149 card_shift = 9,
150 card_size = 1 << card_shift,
151 card_size_in_words = card_size / sizeof(HeapWord)
152 };
153
218 template <class T> inline void inline_write_ref_field(T* field, oop newVal, bool release);
219
220 // These are used by G1, when it uses the card table as a temporary data
221 // structure for card claiming.
222 bool is_card_dirty(size_t card_index) {
223 return _byte_map[card_index] == dirty_card_val();
224 }
225
226 void mark_card_dirty(size_t card_index) {
227 _byte_map[card_index] = dirty_card_val();
228 }
229
230 bool is_card_clean(size_t card_index) {
231 return _byte_map[card_index] == clean_card_val();
232 }
233
234 // Card marking array base (adjusted for heap low boundary)
235 // This would be the 0th element of _byte_map, if the heap started at 0x0.
236 // But since the heap starts at some higher address, this points to somewhere
237 // before the beginning of the actual _byte_map.
238 volatile jbyte* byte_map_base;
239
240 // Return true if "p" is at the start of a card.
241 bool is_card_aligned(HeapWord* p) {
242 volatile jbyte* pcard = byte_for(p);
243 return (addr_for(pcard) == p);
244 }
245
246 HeapWord* align_to_card_boundary(HeapWord* p) {
247 volatile jbyte* pcard = byte_for(p + card_size_in_words - 1);
248 return addr_for(pcard);
249 }
250
251 // The kinds of precision a CardTableModRefBS may offer.
252 enum PrecisionStyle {
253 Precise,
254 ObjHeadPreciseArray
255 };
256
257 // Tells what style of precision this card table offers.
258 PrecisionStyle precision() {
259 return ObjHeadPreciseArray; // Only one supported for now.
260 }
261
262 // ModRefBS functions.
263 virtual void invalidate(MemRegion mr, bool whole_heap = false);
264 void clear(MemRegion mr);
265 void dirty(MemRegion mr);
266
267 // *** Card-table-RemSet-specific things.
268
269 static uintx ct_max_alignment_constraint();
270
271 // Apply closure "cl" to the dirty cards containing some part of
272 // MemRegion "mr".
273 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
274
275 // Return the MemRegion corresponding to the first maximal run
276 // of dirty cards lying completely within MemRegion mr.
277 // If reset is "true", then sets those card table entries to the given
278 // value.
279 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
280 int reset_val);
281
282 // Provide read-only access to the card table array.
283 const volatile jbyte* byte_for_const(const void* p) const {
284 return byte_for(p);
285 }
286 const volatile jbyte* byte_after_const(const void* p) const {
287 return byte_after(p);
288 }
289
290 // Mapping from card marking array entry to address of first word
291 HeapWord* addr_for(const volatile jbyte* p) const {
292 assert(p >= _byte_map && p < _byte_map + _byte_map_size,
293 "out of bounds access to card marking array");
294 size_t delta = pointer_delta((const void*)p, (const void*)byte_map_base, sizeof(jbyte));
295 HeapWord* result = (HeapWord*) (delta << card_shift);
296 assert(_whole_heap.contains(result),
297 "Returning result = " PTR_FORMAT " out of bounds of "
298 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
299 p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
300 return result;
301 }
302
303 // Mapping from address to card marking array index.
304 size_t index_for(void* p) {
305 assert(_whole_heap.contains(p),
306 "Attempt to access p = " PTR_FORMAT " out of bounds of "
307 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
308 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
309 return byte_for(p) - _byte_map;
310 }
311
312 const volatile jbyte* byte_for_index(const size_t card_index) const {
313 return _byte_map + card_index;
314 }
315
316 // Print a description of the memory for the barrier set
317 virtual void print_on(outputStream* st) const;
318
319 void verify();
320 void verify_guard();
321
322 // val_equals -> it will check that all cards covered by mr equal val
323 // !val_equals -> it will check that all cards covered by mr do not equal val
324 void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
325 void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
326 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
327 };
328
329 template<>
330 struct BarrierSet::GetName<CardTableModRefBS> {
331 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
332 };
|