20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
26 #define SHARE_VM_GC_G1_SPARSEPRT_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/heapRegion.hpp"
30 #include "gc/shared/cardTableBarrierSet.hpp"
31 #include "memory/allocation.hpp"
32 #include "runtime/mutex.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/globalDefinitions.hpp"
35
36 // Sparse remembered set for a heap region (the "owning" region). Maps
37 // indices of other regions to short sequences of cards in the other region
38 // that might contain pointers into the owner region.
39
40 // These tables only expand while they are accessed in parallel --
41 // deletions may be done in single-threaded code. This allows us to allow
42 // unsynchronized reads/iterations, as long as expansions caused by
43 // insertions only enqueue old versions for deletions, but do not delete
44 // old versions synchronously.
45
46 class SparsePRTEntry: public CHeapObj<mtGC> {
47 private:
48 // The type of a card entry.
49 typedef uint16_t card_elem_t;
50
51 // We need to make sizeof(SparsePRTEntry) an even multiple of maximum member size,
52 // in order to force correct alignment that could otherwise cause SIGBUS errors
53 // when reading the member variables. This calculates the minimum number of card
54 // array elements required to get that alignment.
55 static const size_t card_array_alignment = sizeof(int) / sizeof(card_elem_t);
56
57 RegionIdx_t _region_ind;
58 int _next_index;
59 int _next_null;
60 // The actual cards stored in this array.
61 // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
62 // It should always be the last data member.
63 card_elem_t _cards[card_array_alignment];
64
65 // Copy the current entry's cards into "cards".
141 // Declares the entry "fi" to be free. (It must have already been
142 // deleted from any bucket lists.
143 void free_entry(int fi);
144
145 public:
146 RSHashTable(size_t capacity);
147 ~RSHashTable();
148
149 static const int NullEntry = -1;
150
151 bool should_expand() const { return _occupied_entries == _num_entries; }
152
153 // Attempts to ensure that the given card_index in the given region is in
154 // the sparse table. If successful (because the card was already
155 // present, or because it was successfully added) returns "true".
156 // Otherwise, returns "false" to indicate that the addition would
157 // overflow the entry for the region. The caller must transfer these
158 // entries to a larger-capacity representation.
159 bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
160
161 bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
162
163 bool delete_entry(RegionIdx_t region_id);
164
165 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
166
167 void add_entry(SparsePRTEntry* e);
168
169 SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
170
171 void clear();
172
173 size_t capacity() const { return _capacity; }
174 size_t capacity_mask() const { return _capacity_mask; }
175 size_t occupied_entries() const { return _occupied_entries; }
176 size_t occupied_cards() const { return _occupied_cards; }
177 size_t mem_size() const;
178 // The number of SparsePRTEntry instances available.
179 size_t num_entries() const { return _num_entries; }
180
181 SparsePRTEntry* entry(int i) const {
182 assert(i >= 0 && (size_t)i < _num_entries, "precondition");
203
204 // Computes the proper card index for the card whose offset in the
205 // current region (as indicated by _bl_ind) is "ci".
206 // This is subject to errors when there is iteration concurrent with
207 // modification, but these errors should be benign.
208 size_t compute_card_ind(CardIdx_t ci);
209
210 public:
211 RSHashTableIter(RSHashTable* rsht) :
212 _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
213 _bl_ind(RSHashTable::NullEntry),
214 _card_ind((SparsePRTEntry::cards_num() - 1)),
215 _rsht(rsht) {}
216
217 bool has_next(size_t& card_index);
218 };
219
220 // Concurrent access to a SparsePRT must be serialized by some external mutex.
221
222 class SparsePRTIter;
223 class SparsePRTCleanupTask;
224
225 class SparsePRT {
226 friend class SparsePRTCleanupTask;
227
228 // Iterations are done on the _cur hash table, since they only need to
229 // see entries visible at the start of a collection pause.
230 // All other operations are done using the _next hash table.
231 RSHashTable* _cur;
232 RSHashTable* _next;
233
234 enum SomeAdditionalPrivateConstants {
235 InitialCapacity = 16
236 };
237
238 void expand();
239
240 bool _expanded;
241
242 bool expanded() { return _expanded; }
243 void set_expanded(bool b) { _expanded = b; }
244
245 SparsePRT* _next_expanded;
246
247 SparsePRT* next_expanded() { return _next_expanded; }
248 void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
249
250 bool should_be_on_expanded_list();
251
252 static SparsePRT* volatile _head_expanded_list;
253
254 public:
255 SparsePRT();
256
257 ~SparsePRT();
258
259 size_t occupied() const { return _next->occupied_cards(); }
260 size_t mem_size() const;
261
262 // Attempts to ensure that the given card_index in the given region is in
263 // the sparse table. If successful (because the card was already
264 // present, or because it was successfully added) returns "true".
265 // Otherwise, returns "false" to indicate that the addition would
266 // overflow the entry for the region. The caller must transfer these
267 // entries to a larger-capacity representation.
268 bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
269
270 // Return the pointer to the entry associated with the given region.
271 SparsePRTEntry* get_entry(RegionIdx_t region_ind);
272
273 // If there is an entry for "region_ind", removes it and return "true";
274 // otherwise returns "false."
275 bool delete_entry(RegionIdx_t region_ind);
276
277 // Clear the table, and reinitialize to initial capacity.
278 void clear();
279
280 // Ensure that "_cur" and "_next" point to the same table.
281 void cleanup();
282
283 // Clean up all tables on the expanded list. Called single threaded.
284 static void cleanup_all();
285 RSHashTable* cur() const { return _cur; }
286
287 static void add_to_expanded_list(SparsePRT* sprt);
288 static SparsePRT* get_from_expanded_list();
289
290 // The purpose of these three methods is to help the GC workers
291 // during the cleanup pause to recreate the expanded list, purging
292 // any tables from it that belong to regions that are freed during
293 // cleanup (if we don't purge those tables, there is a race that
294 // causes various crashes; see CR 7014261).
295 //
296 // We chose to recreate the expanded list, instead of purging
297 // entries from it by iterating over it, to avoid this serial phase
298 // at the end of the cleanup pause.
299 //
300 // The three methods below work as follows:
301 // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
302 // start of the cleanup pause.
303 // * do_cleanup_work() : Called by the cleanup workers for every
304 // region that is not free / is being freed by the cleanup
305 // pause. It creates a list of expanded tables whose head / tail
306 // are on the thread-local SparsePRTCleanupTask object.
307 // * finish_cleanup_task() : Called by the cleanup workers after
308 // they complete their cleanup task. It adds the local list into
309 // the global expanded list. It assumes that the
310 // ParGCRareEvent_lock is being held to ensure MT-safety.
311 static void reset_for_cleanup_tasks();
312 void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
313 static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
314
315 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
316 return _next->contains_card(region_id, card_index);
317 }
318 };
319
320 class SparsePRTIter: public RSHashTableIter {
321 public:
322 SparsePRTIter(const SparsePRT* sprt) :
323 RSHashTableIter(sprt->cur()) {}
324
325 bool has_next(size_t& card_index) {
326 return RSHashTableIter::has_next(card_index);
327 }
328 };
329
330 // This allows each worker during a cleanup pause to create a
331 // thread-local list of sparse tables that have been expanded and need
332 // to be processed at the beginning of the next GC pause. This lists
333 // are concatenated into the single expanded list at the end of the
334 // cleanup pause.
335 class SparsePRTCleanupTask {
336 private:
337 SparsePRT* _head;
338 SparsePRT* _tail;
339
340 public:
341 SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
342
343 void add(SparsePRT* sprt);
344 SparsePRT* head() { return _head; }
345 SparsePRT* tail() { return _tail; }
346 };
347
348 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
26 #define SHARE_VM_GC_G1_SPARSEPRT_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/heapRegion.hpp"
30 #include "gc/shared/cardTableBarrierSet.hpp"
31 #include "memory/allocation.hpp"
32 #include "runtime/mutex.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/globalDefinitions.hpp"
35
36 // Sparse remembered set for a heap region (the "owning" region). Maps
37 // indices of other regions to short sequences of cards in the other region
38 // that might contain pointers into the owner region.
39
40 class SparsePRTEntry: public CHeapObj<mtGC> {
41 private:
42 // The type of a card entry.
43 typedef uint16_t card_elem_t;
44
45 // We need to make sizeof(SparsePRTEntry) an even multiple of maximum member size,
46 // in order to force correct alignment that could otherwise cause SIGBUS errors
47 // when reading the member variables. This calculates the minimum number of card
48 // array elements required to get that alignment.
49 static const size_t card_array_alignment = sizeof(int) / sizeof(card_elem_t);
50
51 RegionIdx_t _region_ind;
52 int _next_index;
53 int _next_null;
54 // The actual cards stored in this array.
55 // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
56 // It should always be the last data member.
57 card_elem_t _cards[card_array_alignment];
58
59 // Copy the current entry's cards into "cards".
135 // Declares the entry "fi" to be free. (It must have already been
136 // deleted from any bucket lists.
137 void free_entry(int fi);
138
139 public:
140 RSHashTable(size_t capacity);
141 ~RSHashTable();
142
143 static const int NullEntry = -1;
144
145 bool should_expand() const { return _occupied_entries == _num_entries; }
146
147 // Attempts to ensure that the given card_index in the given region is in
148 // the sparse table. If successful (because the card was already
149 // present, or because it was successfully added) returns "true".
150 // Otherwise, returns "false" to indicate that the addition would
151 // overflow the entry for the region. The caller must transfer these
152 // entries to a larger-capacity representation.
153 bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
154
155 bool delete_entry(RegionIdx_t region_id);
156
157 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
158
159 void add_entry(SparsePRTEntry* e);
160
161 SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
162
163 void clear();
164
165 size_t capacity() const { return _capacity; }
166 size_t capacity_mask() const { return _capacity_mask; }
167 size_t occupied_entries() const { return _occupied_entries; }
168 size_t occupied_cards() const { return _occupied_cards; }
169 size_t mem_size() const;
170 // The number of SparsePRTEntry instances available.
171 size_t num_entries() const { return _num_entries; }
172
173 SparsePRTEntry* entry(int i) const {
174 assert(i >= 0 && (size_t)i < _num_entries, "precondition");
195
196 // Computes the proper card index for the card whose offset in the
197 // current region (as indicated by _bl_ind) is "ci".
198 // This is subject to errors when there is iteration concurrent with
199 // modification, but these errors should be benign.
200 size_t compute_card_ind(CardIdx_t ci);
201
202 public:
203 RSHashTableIter(RSHashTable* rsht) :
204 _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
205 _bl_ind(RSHashTable::NullEntry),
206 _card_ind((SparsePRTEntry::cards_num() - 1)),
207 _rsht(rsht) {}
208
209 bool has_next(size_t& card_index);
210 };
211
212 // Concurrent access to a SparsePRT must be serialized by some external mutex.
213
214 class SparsePRTIter;
215
216 class SparsePRT {
217 friend class SparsePRTIter;
218
219 RSHashTable* _table;
220
221 enum SomeAdditionalPrivateConstants {
222 InitialCapacity = 16
223 };
224
225 void expand();
226
227 public:
228 SparsePRT();
229 ~SparsePRT();
230
231 size_t occupied() const { return _table->occupied_cards(); }
232 size_t mem_size() const;
233
234 // Attempts to ensure that the given card_index in the given region is in
235 // the sparse table. If successful (because the card was already
236 // present, or because it was successfully added) returns "true".
237 // Otherwise, returns "false" to indicate that the addition would
238 // overflow the entry for the region. The caller must transfer these
239 // entries to a larger-capacity representation.
240 bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
241
242 // Return the pointer to the entry associated with the given region.
243 SparsePRTEntry* get_entry(RegionIdx_t region_ind);
244
245 // If there is an entry for "region_ind", removes it and return "true";
246 // otherwise returns "false."
247 bool delete_entry(RegionIdx_t region_ind);
248
249 // Clear the table, and reinitialize to initial capacity.
250 void clear();
251
252 bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
253 return _table->contains_card(region_id, card_index);
254 }
255 };
256
257 class SparsePRTIter: public RSHashTableIter {
258 public:
259 SparsePRTIter(const SparsePRT* sprt) :
260 RSHashTableIter(sprt->_table) { }
261
262 bool has_next(size_t& card_index) {
263 return RSHashTableIter::has_next(card_index);
264 }
265 };
266
267 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP
|