9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
26 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
27
28 #include "gc/shared/modRefBarrierSet.hpp"
29 #include "oops/oop.hpp"
30
31 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
32 // enumerate ref fields that have been modified (since the last
33 // enumeration.)
34
35 // As it currently stands, this barrier is *imprecise*: when a ref field in
36 // an object "o" is modified, the card table entry for the card containing
37 // the head of "o" is dirtied, not necessarily the card containing the
38 // modified field itself. For object arrays, however, the barrier *is*
39 // precise; only the card containing the modified element is dirtied.
40 // Closures used to scan dirty cards should take these
41 // considerations into account.
42
43 class CardTableModRefBS: public ModRefBarrierSet {
44 // Some classes get to look at some private stuff.
45 friend class VMStructs;
46 protected:
47
48 enum CardValues {
49 clean_card = -1,
50 // The mask contains zeros in places for all other values.
51 clean_card_mask = clean_card - 31,
52
53 dirty_card = 0,
54 precleaned_card = 1,
55 claimed_card = 2,
56 deferred_card = 4,
57 last_card = 8,
58 CT_MR_BS_last_reserved = 16
59 };
60
61 // a word's worth (row) of clean card values
62 static const intptr_t clean_card_row = (intptr_t)(-1);
63
64 // The declaration order of these const fields is important; see the
65 // constructor before changing.
66 const MemRegion _whole_heap; // the region covered by the card table
67 size_t _guard_index; // index of very last element in the card
68 // table; it is set to a guard value
69 // (last_card) and should never be modified
70 size_t _last_valid_index; // index of the last valid element
71 const size_t _page_size; // page size used when mapping _byte_map
72 size_t _byte_map_size; // in bytes
73 jbyte* _byte_map; // the card marking array
74
75 int _cur_covered_regions;
76 // The covered regions should be in address order.
77 MemRegion* _covered;
78 // The committed regions correspond one-to-one to the covered regions.
79 // They represent the card-table memory that has been committed to service
80 // the corresponding covered region. It may be that committed region for
81 // one covered region corresponds to a larger region because of page-size
82 // roundings. Thus, a committed region for one covered region may
83 // actually extend onto the card-table space for the next covered region.
84 MemRegion* _committed;
85
86 // The last card is a guard card, and we commit the page for it so
87 // we can use the card for verification purposes. We make sure we never
88 // uncommit the MemRegion for that page.
89 MemRegion _guard_region;
90
91 protected:
92 inline size_t compute_byte_map_size();
93
94 // Finds and return the index of the region, if any, to which the given
95 // region would be contiguous. If none exists, assign a new region and
96 // returns its index. Requires that no more than the maximum number of
97 // covered regions defined in the constructor are ever in use.
98 int find_covering_region_by_base(HeapWord* base);
99
100 // Same as above, but finds the region containing the given address
101 // instead of starting at a given base address.
102 int find_covering_region_containing(HeapWord* addr);
103
104 // Resize one of the regions covered by the remembered set.
105 virtual void resize_covered_region(MemRegion new_region);
106
107 // Returns the leftmost end of a committed region corresponding to a
108 // covered region before covered region "ind", or else "NULL" if "ind" is
109 // the first covered region.
110 HeapWord* largest_prev_committed_end(int ind) const;
111
112 // Returns the part of the region mr that doesn't intersect with
113 // any committed region other than self. Used to prevent uncommitting
114 // regions that are also committed by other regions. Also protects
115 // against uncommitting the guard region.
116 MemRegion committed_unique_to_self(int self, MemRegion mr) const;
117
118 // Mapping from address to card marking array entry
119 jbyte* byte_for(const void* p) const {
120 assert(_whole_heap.contains(p),
121 "Attempt to access p = " PTR_FORMAT " out of bounds of "
122 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
123 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
124 jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
125 assert(result >= _byte_map && result < _byte_map + _byte_map_size,
126 "out of bounds accessor for card marking array");
127 return result;
128 }
129
130 // The card table byte one after the card marking array
131 // entry for argument address. Typically used for higher bounds
132 // for loops iterating through the card table.
133 jbyte* byte_after(const void* p) const {
134 return byte_for(p) + 1;
135 }
136
137 protected:
138 // Dirty the bytes corresponding to "mr" (not all of which must be
139 // covered.)
140 void dirty_MemRegion(MemRegion mr);
141
142 // Clear (to clean_card) the bytes entirely contained within "mr" (not
143 // all of which must be covered.)
144 void clear_MemRegion(MemRegion mr);
145
146 public:
147 // Constants
148 enum SomePublicConstants {
149 card_shift = 9,
150 card_size = 1 << card_shift,
151 card_size_in_words = card_size / sizeof(HeapWord)
152 };
153
154 static int clean_card_val() { return clean_card; }
155 static int clean_card_mask_val() { return clean_card_mask; }
156 static int dirty_card_val() { return dirty_card; }
157 static int claimed_card_val() { return claimed_card; }
158 static int precleaned_card_val() { return precleaned_card; }
159 static int deferred_card_val() { return deferred_card; }
160
161 virtual void initialize();
162
163 // *** Barrier set functions.
164
165 bool has_write_ref_pre_barrier() { return false; }
166
167 // Initialization utilities; covered_words is the size of the covered region
168 // in, um, words.
169 inline size_t cards_required(size_t covered_words) {
170 // Add one for a guard card, used to detect errors.
171 const size_t words = align_size_up(covered_words, card_size_in_words);
172 return words / card_size_in_words + 1;
173 }
174
175 protected:
176
177 CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
178 ~CardTableModRefBS();
179
180 // Record a reference update. Note that these versions are precise!
181 // The scanning code has to handle the fact that the write barrier may be
182 // either precise or imprecise. We make non-virtual inline variants of
183 // these functions here for performance.
184
185 void write_ref_field_work(oop obj, size_t offset, oop newVal);
186 virtual void write_ref_field_work(void* field, oop newVal, bool release);
187 public:
188
189 bool has_write_ref_array_opt() { return true; }
190 bool has_write_region_opt() { return true; }
191
192 inline void inline_write_region(MemRegion mr) {
193 dirty_MemRegion(mr);
194 }
195 protected:
196 void write_region_work(MemRegion mr) {
197 inline_write_region(mr);
198 }
199 public:
200
201 inline void inline_write_ref_array(MemRegion mr) {
202 dirty_MemRegion(mr);
203 }
204 protected:
205 void write_ref_array_work(MemRegion mr) {
206 inline_write_ref_array(mr);
207 }
208 public:
209
210 bool is_aligned(HeapWord* addr) {
211 return is_card_aligned(addr);
212 }
213
214 // *** Card-table-barrier-specific things.
215
216 template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
217
218 template <class T> inline void inline_write_ref_field(T* field, oop newVal, bool release);
219
220 // These are used by G1, when it uses the card table as a temporary data
221 // structure for card claiming.
222 bool is_card_dirty(size_t card_index) {
223 return _byte_map[card_index] == dirty_card_val();
224 }
225
226 void mark_card_dirty(size_t card_index) {
227 _byte_map[card_index] = dirty_card_val();
228 }
229
230 bool is_card_clean(size_t card_index) {
231 return _byte_map[card_index] == clean_card_val();
232 }
233
234 // Card marking array base (adjusted for heap low boundary)
235 // This would be the 0th element of _byte_map, if the heap started at 0x0.
236 // But since the heap starts at some higher address, this points to somewhere
237 // before the beginning of the actual _byte_map.
238 jbyte* byte_map_base;
239
240 // Return true if "p" is at the start of a card.
241 bool is_card_aligned(HeapWord* p) {
242 jbyte* pcard = byte_for(p);
243 return (addr_for(pcard) == p);
244 }
245
246 HeapWord* align_to_card_boundary(HeapWord* p) {
247 jbyte* pcard = byte_for(p + card_size_in_words - 1);
248 return addr_for(pcard);
249 }
250
251 // The kinds of precision a CardTableModRefBS may offer.
252 enum PrecisionStyle {
253 Precise,
254 ObjHeadPreciseArray
255 };
256
257 // Tells what style of precision this card table offers.
258 PrecisionStyle precision() {
259 return ObjHeadPreciseArray; // Only one supported for now.
260 }
261
262 // ModRefBS functions.
263 virtual void invalidate(MemRegion mr);
264 void clear(MemRegion mr);
265 void dirty(MemRegion mr);
266
267 // *** Card-table-RemSet-specific things.
268
269 static uintx ct_max_alignment_constraint();
270
271 // Apply closure "cl" to the dirty cards containing some part of
272 // MemRegion "mr".
273 void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
274
275 // Return the MemRegion corresponding to the first maximal run
276 // of dirty cards lying completely within MemRegion mr.
277 // If reset is "true", then sets those card table entries to the given
278 // value.
279 MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
280 int reset_val);
281
282 // Provide read-only access to the card table array.
283 const jbyte* byte_for_const(const void* p) const {
284 return byte_for(p);
285 }
286 const jbyte* byte_after_const(const void* p) const {
287 return byte_after(p);
288 }
289
290 // Mapping from card marking array entry to address of first word
291 HeapWord* addr_for(const jbyte* p) const {
292 assert(p >= _byte_map && p < _byte_map + _byte_map_size,
293 "out of bounds access to card marking array");
294 size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
295 HeapWord* result = (HeapWord*) (delta << card_shift);
296 assert(_whole_heap.contains(result),
297 "Returning result = " PTR_FORMAT " out of bounds of "
298 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
299 p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
300 return result;
301 }
302
303 // Mapping from address to card marking array index.
304 size_t index_for(void* p) {
305 assert(_whole_heap.contains(p),
306 "Attempt to access p = " PTR_FORMAT " out of bounds of "
307 " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
308 p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
309 return byte_for(p) - _byte_map;
310 }
311
312 const jbyte* byte_for_index(const size_t card_index) const {
313 return _byte_map + card_index;
314 }
315
316 // Print a description of the memory for the barrier set
317 virtual void print_on(outputStream* st) const;
318
319 void verify();
320 void verify_guard();
321
322 // val_equals -> it will check that all cards covered by mr equal val
323 // !val_equals -> it will check that all cards covered by mr do not equal val
324 void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
325 void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
326 void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
327 };
328
329 template<>
330 struct BarrierSet::GetName<CardTableModRefBS> {
331 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
332 };
333
334
335 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
26 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
27
28 #include "gc/shared/modRefBarrierSet.hpp"
29
30 class CardTable;
31
32 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
33 // enumerate ref fields that have been modified (since the last
34 // enumeration.)
35
36 // As it currently stands, this barrier is *imprecise*: when a ref field in
37 // an object "o" is modified, the card table entry for the card containing
38 // the head of "o" is dirtied, not necessarily the card containing the
39 // modified field itself. For object arrays, however, the barrier *is*
40 // precise; only the card containing the modified element is dirtied.
41 // Closures used to scan dirty cards should take these
42 // considerations into account.
43
44 class CardTableModRefBS: public ModRefBarrierSet {
45 // Some classes get to look at some private stuff.
46 friend class VMStructs;
47 protected:
48 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
49 // or INCLUDE_JVMCI is being used
50 bool _defer_initial_card_mark;
51 bool _can_elide_tlab_store_barriers;
52 CardTable* _card_table;
53
54 CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti);
55
56 public:
57 CardTableModRefBS(CardTable* card_table);
58 ~CardTableModRefBS();
59
60 void initialize();
61
62 CardTable* card_table() const { return _card_table; }
63
64 // Record a reference update. Note that these versions are precise!
65 // The scanning code has to handle the fact that the write barrier may be
66 // either precise or imprecise. We make non-virtual inline variants of
67 // these functions here for performance.
68
69 template <DecoratorSet decorators>
70 void write_ref_field_post(void* field, oop newVal);
71
72 virtual void write_region(MemRegion mr);
73 virtual void write_ref_array_region(MemRegion mr);
74
75 // ModRefBS functions.
76 virtual void invalidate(MemRegion mr);
77
78 // Print a description of the memory for the barrier set
79 virtual void print_on(outputStream* st) const;
80
81 // ReduceInitialCardMarks
82 void new_deferred_store_barrier(JavaThread* thread, oop new_obj);
83
84 // If the CollectedHeap was asked to defer a store barrier above,
85 // this informs it to flush such a deferred store barrier to the
86 // remembered set.
87 void flush_deferred_store_barrier(JavaThread* thread);
88
89 // Can a compiler initialize a new object without store barriers?
90 // This permission only extends from the creation of a new object
91 // via a TLAB up to the first subsequent safepoint. If such permission
92 // is granted for this heap type, the compiler promises to call
93 // defer_store_barrier() below on any slow path allocation of
94 // a new object for which such initializing store barriers will
95 // have been elided. G1, like CMS, allows this, but should be
96 // ready to provide a compensating write barrier as necessary
97 // if that storage came out of a non-young region. The efficiency
98 // of this implementation depends crucially on being able to
99 // answer very efficiently in constant time whether a piece of
100 // storage in the heap comes from a young region or not.
101 // See ReduceInitialCardMarks.
102 virtual bool can_elide_tlab_store_barriers() const { return true; }
103
104 // If a compiler is eliding store barriers for TLAB-allocated objects,
105 // we will be informed of a slow-path allocation by a call
106 // to new_deferred_store_barrier() above. Such a call precedes the
107 // initialization of the object itself, and no post-store-barriers will
108 // be issued. Some heap types require that the barrier strictly follows
109 // the initializing stores. (This is currently implemented by deferring the
110 // barrier until the next slow-path allocation or gc-related safepoint.)
111 // This interface answers whether a particular barrier type needs the card
112 // mark to be thus strictly sequenced after the stores.
113 virtual bool card_mark_must_follow_store() const;
114
115 void on_slowpath_allocation(JavaThread* thread, oop new_obj) {
116 if (_can_elide_tlab_store_barriers) {
117 new_deferred_store_barrier(thread, new_obj);
118 }
119 }
120 void on_destroy_thread(JavaThread* thread);
121 void make_parsable(JavaThread* thread);
122
123 protected:
124 virtual BarrierSetCodeGen* make_code_gen();
125 virtual C1BarrierSetCodeGen* make_c1_code_gen();
126 virtual C2BarrierSetCodeGen* make_c2_code_gen();
127
128 public:
129 template <DecoratorSet decorators>
130 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, CardTableModRefBS> {};
131 };
132
133 template<>
134 struct BSTypeToName<CardTableModRefBS> {
135 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
136 };
137
138 template<>
139 struct BSNameToType<BarrierSet::CardTableModRef> {
140 typedef CardTableModRefBS type;
141 };
142
143 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
|