Print this page
rev 2722 : 7095194: G1: HeapRegion::GrainBytes, GrainWords, and CardsPerRegion should be size_t
Summary: Declare GrainBytes, GrainWords, and CardsPerRegion as size_t.
Reviewed-by:
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+++ new/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
27 27
28 28 #include "gc_implementation/g1/sparsePRT.hpp"
29 29
30 30 // Remembered set for a heap region. Represent a set of "cards" that
31 31 // contain pointers into the owner heap region. Cards are defined somewhat
32 32 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
33 33
34 34 class G1CollectedHeap;
35 35 class G1BlockOffsetSharedArray;
36 36 class HeapRegion;
37 37 class HeapRegionRemSetIterator;
38 38 class PosParPRT;
39 39 class SparsePRT;
40 40
41 41 // Essentially a wrapper around SparsePRTCleanupTask. See
42 42 // sparsePRT.hpp for more details.
43 43 class HRRSCleanupTask : public SparsePRTCleanupTask {
44 44 };
45 45
46 46 // The "_coarse_map" is a bitmap with one bit for each region, where set
47 47 // bits indicate that the corresponding region may contain some pointer
48 48 // into the owning region.
49 49
50 50 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
51 51 // (PRTs), indicating regions for which we're keeping the RS as a set of
52 52 // cards. The strategy is to cap the size of the fine-grain table,
53 53 // deleting an entry and setting the corresponding coarse-grained bit when
54 54 // we would overflow this cap.
55 55
56 56 // We use a mixture of locking and lock-free techniques here. We allow
57 57 // threads to locate PRTs without locking, but threads attempting to alter
58 58 // a bucket list obtain a lock. This means that any failing attempt to
59 59 // find a PRT must be retried with the lock. It might seem dangerous that
60 60 // a read can find a PRT that is concurrently deleted. This is all right,
61 61 // because:
62 62 //
63 63 // 1) We only actually free PRT's at safe points (though we reuse them at
64 64 // other times).
65 65 // 2) We find PRT's in an attempt to add entries. If a PRT is deleted,
66 66 // it's _coarse_map bit is set, so the that we were attempting to add
67 67 // is represented. If a deleted PRT is re-used, a thread adding a bit,
68 68 // thinking the PRT is for a different region, does no harm.
69 69
70 70 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
71 71 friend class HeapRegionRemSetIterator;
72 72
73 73 G1CollectedHeap* _g1h;
74 74 Mutex _m;
75 75 HeapRegion* _hr;
76 76
77 77 // These are protected by "_m".
78 78 BitMap _coarse_map;
79 79 size_t _n_coarse_entries;
80 80 static jint _n_coarsenings;
81 81
82 82 PosParPRT** _fine_grain_regions;
83 83 size_t _n_fine_entries;
84 84
85 85 #define SAMPLE_FOR_EVICTION 1
86 86 #if SAMPLE_FOR_EVICTION
87 87 size_t _fine_eviction_start;
88 88 static size_t _fine_eviction_stride;
89 89 static size_t _fine_eviction_sample_size;
90 90 #endif
91 91
92 92 SparsePRT _sparse_table;
93 93
94 94 // These are static after init.
95 95 static size_t _max_fine_entries;
96 96 static size_t _mod_max_fine_entries_mask;
97 97
98 98 // Requires "prt" to be the first element of the bucket list appropriate
99 99 // for "hr". If this list contains an entry for "hr", return it,
100 100 // otherwise return "NULL".
101 101 PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const;
102 102
103 103 // Find, delete, and return a candidate PosParPRT, if any exists,
104 104 // adding the deleted region to the coarse bitmap. Requires the caller
105 105 // to hold _m, and the fine-grain table to be full.
106 106 PosParPRT* delete_region_table();
107 107
108 108 // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
109 109 // be the correct index for "hr"), delete it and return true; else return
110 110 // false.
111 111 bool del_single_region_table(size_t ind, HeapRegion* hr);
112 112
113 113 static jint _cache_probes;
114 114 static jint _cache_hits;
115 115
116 116 // Indexed by thread X heap region, to minimize thread contention.
117 117 static int** _from_card_cache;
118 118 static size_t _from_card_cache_max_regions;
119 119 static size_t _from_card_cache_mem_size;
120 120
121 121 public:
122 122 OtherRegionsTable(HeapRegion* hr);
123 123
124 124 HeapRegion* hr() const { return _hr; }
125 125
126 126 // For now. Could "expand" some tables in the future, so that this made
127 127 // sense.
128 128 void add_reference(OopOrNarrowOopStar from, int tid);
129 129
130 130 void add_reference(OopOrNarrowOopStar from) {
131 131 return add_reference(from, 0);
132 132 }
133 133
134 134 // Removes any entries shown by the given bitmaps to contain only dead
135 135 // objects.
136 136 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
137 137
138 138 // Not const because it takes a lock.
139 139 size_t occupied() const;
140 140 size_t occ_fine() const;
141 141 size_t occ_coarse() const;
142 142 size_t occ_sparse() const;
143 143
144 144 static jint n_coarsenings() { return _n_coarsenings; }
145 145
146 146 // Returns size in bytes.
147 147 // Not const because it takes a lock.
148 148 size_t mem_size() const;
149 149 static size_t static_mem_size();
150 150 static size_t fl_mem_size();
151 151
152 152 bool contains_reference(OopOrNarrowOopStar from) const;
153 153 bool contains_reference_locked(OopOrNarrowOopStar from) const;
154 154
155 155 void clear();
156 156
157 157 // Specifically clear the from_card_cache.
158 158 void clear_fcc();
159 159
160 160 // "from_hr" is being cleared; remove any entries from it.
161 161 void clear_incoming_entry(HeapRegion* from_hr);
162 162
163 163 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
164 164
165 165 // Declare the heap size (in # of regions) to the OtherRegionsTable.
166 166 // (Uses it to initialize from_card_cache).
167 167 static void init_from_card_cache(size_t max_regions);
168 168
169 169 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
170 170 // Make sure any entries for higher regions are invalid.
171 171 static void shrink_from_card_cache(size_t new_n_regs);
172 172
173 173 static void print_from_card_cache();
174 174 };
175 175
176 176 class HeapRegionRemSet : public CHeapObj {
177 177 friend class VMStructs;
178 178 friend class HeapRegionRemSetIterator;
179 179
180 180 public:
181 181 enum Event {
182 182 Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
183 183 };
184 184
185 185 private:
186 186 G1BlockOffsetSharedArray* _bosa;
187 187 G1BlockOffsetSharedArray* bosa() const { return _bosa; }
188 188
189 189 OtherRegionsTable _other_regions;
190 190
191 191 enum ParIterState { Unclaimed, Claimed, Complete };
192 192 volatile ParIterState _iter_state;
193 193 volatile jlong _iter_claimed;
194 194
195 195 // Unused unless G1RecordHRRSOops is true.
196 196
197 197 static const int MaxRecorded = 1000000;
198 198 static OopOrNarrowOopStar* _recorded_oops;
199 199 static HeapWord** _recorded_cards;
200 200 static HeapRegion** _recorded_regions;
201 201 static int _n_recorded;
202 202
203 203 static const int MaxRecordedEvents = 1000;
204 204 static Event* _recorded_events;
205 205 static int* _recorded_event_index;
206 206 static int _n_recorded_events;
207 207
208 208 static void print_event(outputStream* str, Event evnt);
209 209
210 210 public:
211 211 HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
212 212 HeapRegion* hr);
213 213
214 214 static int num_par_rem_sets();
215 215 static void setup_remset_size();
216 216
217 217 HeapRegion* hr() const {
218 218 return _other_regions.hr();
219 219 }
220 220
221 221 size_t occupied() const {
222 222 return _other_regions.occupied();
223 223 }
224 224 size_t occ_fine() const {
225 225 return _other_regions.occ_fine();
226 226 }
227 227 size_t occ_coarse() const {
228 228 return _other_regions.occ_coarse();
229 229 }
230 230 size_t occ_sparse() const {
231 231 return _other_regions.occ_sparse();
232 232 }
233 233
234 234 static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
235 235
236 236 /* Used in the sequential case. Returns "true" iff this addition causes
237 237 the size limit to be reached. */
238 238 void add_reference(OopOrNarrowOopStar from) {
239 239 _other_regions.add_reference(from);
240 240 }
241 241
242 242 /* Used in the parallel case. Returns "true" iff this addition causes
243 243 the size limit to be reached. */
244 244 void add_reference(OopOrNarrowOopStar from, int tid) {
245 245 _other_regions.add_reference(from, tid);
246 246 }
247 247
248 248 // Removes any entries shown by the given bitmaps to contain only dead
249 249 // objects.
250 250 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
251 251
252 252 // The region is being reclaimed; clear its remset, and any mention of
253 253 // entries for this region in other remsets.
254 254 void clear();
255 255
256 256 // Forget any entries due to pointers from "from_hr".
257 257 void clear_incoming_entry(HeapRegion* from_hr) {
258 258 _other_regions.clear_incoming_entry(from_hr);
259 259 }
260 260
261 261 #if 0
262 262 virtual void cleanup() = 0;
263 263 #endif
264 264
265 265 // Attempt to claim the region. Returns true iff this call caused an
266 266 // atomic transition from Unclaimed to Claimed.
267 267 bool claim_iter();
268 268 // Sets the iteration state to "complete".
269 269 void set_iter_complete();
270 270 // Returns "true" iff the region's iteration is complete.
271 271 bool iter_is_complete();
272 272
273 273 // Support for claiming blocks of cards during iteration
274 274 size_t iter_claimed() const { return (size_t)_iter_claimed; }
275 275 // Claim the next block of cards
276 276 size_t iter_claimed_next(size_t step) {
277 277 size_t current, next;
278 278 do {
279 279 current = iter_claimed();
280 280 next = current + step;
281 281 } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
282 282 return current;
283 283 }
284 284 void reset_for_par_iteration();
285 285
286 286 bool verify_ready_for_par_iteration() {
287 287 return (_iter_state == Unclaimed) && (_iter_claimed == 0);
288 288 }
289 289
290 290 // Initialize the given iterator to iterate over this rem set.
291 291 void init_iterator(HeapRegionRemSetIterator* iter) const;
292 292
293 293 #if 0
294 294 // Apply the "do_card" method to the start address of every card in the
295 295 // rem set. Returns false if some application of the closure aborted.
296 296 virtual bool card_iterate(CardClosure* iter) = 0;
297 297 #endif
298 298
299 299 // The actual # of bytes this hr_remset takes up.
300 300 size_t mem_size() {
301 301 return _other_regions.mem_size()
302 302 // This correction is necessary because the above includes the second
303 303 // part.
304 304 + sizeof(this) - sizeof(OtherRegionsTable);
305 305 }
306 306
307 307 // Returns the memory occupancy of all static data structures associated
308 308 // with remembered sets.
309 309 static size_t static_mem_size() {
310 310 return OtherRegionsTable::static_mem_size();
311 311 }
312 312
313 313 // Returns the memory occupancy of all free_list data structures associated
314 314 // with remembered sets.
315 315 static size_t fl_mem_size() {
316 316 return OtherRegionsTable::fl_mem_size();
317 317 }
318 318
319 319 bool contains_reference(OopOrNarrowOopStar from) const {
320 320 return _other_regions.contains_reference(from);
321 321 }
322 322 void print() const;
323 323
324 324 // Called during a stop-world phase to perform any deferred cleanups.
325 325 // The second version may be called by parallel threads after then finish
326 326 // collection work.
327 327 static void cleanup();
328 328 static void par_cleanup();
329 329
330 330 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
331 331 // (Uses it to initialize from_card_cache).
332 332 static void init_heap(size_t max_regions) {
333 333 OtherRegionsTable::init_from_card_cache(max_regions);
334 334 }
335 335
336 336 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
337 337 static void shrink_heap(size_t new_n_regs) {
338 338 OtherRegionsTable::shrink_from_card_cache(new_n_regs);
339 339 }
340 340
341 341 #ifndef PRODUCT
342 342 static void print_from_card_cache() {
343 343 OtherRegionsTable::print_from_card_cache();
344 344 }
345 345 #endif
346 346
347 347 static void record(HeapRegion* hr, OopOrNarrowOopStar f);
348 348 static void print_recorded();
349 349 static void record_event(Event evnt);
350 350
351 351 // These are wrappers for the similarly-named methods on
352 352 // SparsePRT. Look at sparsePRT.hpp for more details.
353 353 static void reset_for_cleanup_tasks();
354 354 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
355 355 static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);
356 356
357 357 // Run unit tests.
358 358 #ifndef PRODUCT
359 359 static void test();
360 360 #endif
361 361 };
362 362
363 363 class HeapRegionRemSetIterator : public CHeapObj {
364 364
365 365 // The region over which we're iterating.
366 366 const HeapRegionRemSet* _hrrs;
367 367
368 368 // Local caching of HRRS fields.
369 369 const BitMap* _coarse_map;
370 370 PosParPRT** _fine_grain_regions;
371 371
372 372 G1BlockOffsetSharedArray* _bosa;
373 373 G1CollectedHeap* _g1h;
374 374
375 375 // The number yielded since initialization.
376 376 size_t _n_yielded_fine;
377 377 size_t _n_yielded_coarse;
378 378 size_t _n_yielded_sparse;
379 379
380 380 // If true we're iterating over the coarse table; if false the fine
381 381 // table.
382 382 enum IterState {
383 383 Sparse,
384 384 Fine,
385 385 Coarse
386 386 };
387 387 IterState _is;
↓ open down ↓ |
387 lines elided |
↑ open up ↑ |
388 388
389 389 // In both kinds of iteration, heap offset of first card of current
390 390 // region.
391 391 size_t _cur_region_card_offset;
392 392 // Card offset within cur region.
393 393 size_t _cur_region_cur_card;
394 394
395 395 // Coarse table iteration fields:
396 396
397 397 // Current region index;
398 - int _coarse_cur_region_index;
399 - int _coarse_cur_region_cur_card;
398 + int _coarse_cur_region_index;
399 + size_t _coarse_cur_region_cur_card;
400 400
401 401 bool coarse_has_next(size_t& card_index);
402 402
403 403 // Fine table iteration fields:
404 404
405 405 // Index of bucket-list we're working on.
406 406 int _fine_array_index;
407 407 // Per Region Table we're doing within current bucket list.
408 408 PosParPRT* _fine_cur_prt;
409 409
410 410 /* SparsePRT::*/ SparsePRTIter _sparse_iter;
411 411
412 412 void fine_find_next_non_null_prt();
413 413
414 414 bool fine_has_next();
415 415 bool fine_has_next(size_t& card_index);
416 416
417 417 public:
418 418 // We require an iterator to be initialized before use, so the
419 419 // constructor does little.
420 420 HeapRegionRemSetIterator();
421 421
422 422 void initialize(const HeapRegionRemSet* hrrs);
423 423
424 424 // If there remains one or more cards to be yielded, returns true and
425 425 // sets "card_index" to one of those cards (which is then considered
426 426 // yielded.) Otherwise, returns false (and leaves "card_index"
427 427 // undefined.)
428 428 bool has_next(size_t& card_index);
429 429
430 430 size_t n_yielded_fine() { return _n_yielded_fine; }
431 431 size_t n_yielded_coarse() { return _n_yielded_coarse; }
432 432 size_t n_yielded_sparse() { return _n_yielded_sparse; }
433 433 size_t n_yielded() {
434 434 return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse();
435 435 }
436 436 };
437 437
438 438 #if 0
439 439 class CardClosure: public Closure {
440 440 public:
441 441 virtual void do_card(HeapWord* card_start) = 0;
442 442 };
443 443
444 444 #endif
445 445
446 446 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX