29 #include "gc/shared/genCollectedHeap.hpp"
30 #include "gc/shared/space.inline.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/virtualspace.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/java.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "runtime/vmThread.hpp"
38
39 void CardTableModRefBSForCTRS::
40 non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
41 OopsInGenClosure* cl,
42 CardTableRS* ct,
43 uint n_threads) {
44 assert(n_threads > 0, "expected n_threads > 0");
45 assert(n_threads <= ParallelGCThreads,
46 "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
47
48 // Make sure the LNC array is valid for the space.
49 jbyte** lowest_non_clean;
50 uintptr_t lowest_non_clean_base_chunk_index;
51 size_t lowest_non_clean_chunk_size;
52 get_LNC_array_for_space(sp, lowest_non_clean,
53 lowest_non_clean_base_chunk_index,
54 lowest_non_clean_chunk_size);
55
56 uint n_strides = n_threads * ParGCStridesPerThread;
57 SequentialSubTasksDone* pst = sp->par_seq_tasks();
58 // Sets the condition for completion of the subtask (how many threads
59 // need to finish in order to be done).
60 pst->set_n_threads(n_threads);
61 pst->set_n_tasks(n_strides);
62
63 uint stride = 0;
64 while (!pst->is_task_claimed(/* reference */ stride)) {
65 process_stride(sp, mr, stride, n_strides,
66 cl, ct,
67 lowest_non_clean,
68 lowest_non_clean_base_chunk_index,
69 lowest_non_clean_chunk_size);
71 if (pst->all_tasks_completed()) {
72 // Clear lowest_non_clean array for next time.
73 intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
74 uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
75 for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
76 intptr_t ind = ch - lowest_non_clean_base_chunk_index;
77 assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
78 "Bounds error");
79 lowest_non_clean[ind] = NULL;
80 }
81 }
82 }
83
84 void
85 CardTableModRefBSForCTRS::
86 process_stride(Space* sp,
87 MemRegion used,
88 jint stride, int n_strides,
89 OopsInGenClosure* cl,
90 CardTableRS* ct,
91 jbyte** lowest_non_clean,
92 uintptr_t lowest_non_clean_base_chunk_index,
93 size_t lowest_non_clean_chunk_size) {
94 // We go from higher to lower addresses here; it wouldn't help that much
95 // because of the strided parallelism pattern used here.
96
97 // Find the first card address of the first chunk in the stride that is
98 // at least "bottom" of the used region.
99 jbyte* start_card = byte_for(used.start());
100 jbyte* end_card = byte_after(used.last());
101 uintptr_t start_chunk = addr_to_chunk_index(used.start());
102 uintptr_t start_chunk_stride_num = start_chunk % n_strides;
103 jbyte* chunk_card_start;
104
105 if ((uintptr_t)stride >= start_chunk_stride_num) {
106 chunk_card_start = (jbyte*)(start_card +
107 (stride - start_chunk_stride_num) *
108 ParGCCardsPerStrideChunk);
109 } else {
110 // Go ahead to the next chunk group boundary, then to the requested stride.
111 chunk_card_start = (jbyte*)(start_card +
112 (n_strides - start_chunk_stride_num + stride) *
113 ParGCCardsPerStrideChunk);
114 }
115
116 while (chunk_card_start < end_card) {
117 // Even though we go from lower to higher addresses below, the
118 // strided parallelism can interleave the actual processing of the
119 // dirty pages in various ways. For a specific chunk within this
120 // stride, we take care to avoid double scanning or missing a card
121 // by suitably initializing the "min_done" field in process_chunk_boundaries()
122 // below, together with the dirty region extension accomplished in
123 // DirtyCardToOopClosure::do_MemRegion().
124 jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
125 // Invariant: chunk_mr should be fully contained within the "used" region.
126 MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
127 chunk_card_end >= end_card ?
128 used.end() : addr_for(chunk_card_end));
129 assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
130 assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
131
132 // This function is used by the parallel card table iteration.
133 const bool parallel = true;
134
135 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
136 cl->gen_boundary(),
137 parallel);
138 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
139
140
141 // Process the chunk.
142 process_chunk_boundaries(sp,
143 dcto_cl,
144 chunk_mr,
150 // We want the LNC array updates above in process_chunk_boundaries
151 // to be visible before any of the card table value changes as a
152 // result of the dirty card iteration below.
153 OrderAccess::storestore();
154
155 // We want to clear the cards: clear_cl here does the work of finding
156 // contiguous dirty ranges of cards to process and clear.
157 clear_cl.do_MemRegion(chunk_mr);
158
159 // Find the next chunk of the stride.
160 chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
161 }
162 }
163
164 void
165 CardTableModRefBSForCTRS::
166 process_chunk_boundaries(Space* sp,
167 DirtyCardToOopClosure* dcto_cl,
168 MemRegion chunk_mr,
169 MemRegion used,
170 jbyte** lowest_non_clean,
171 uintptr_t lowest_non_clean_base_chunk_index,
172 size_t lowest_non_clean_chunk_size)
173 {
174 // We must worry about non-array objects that cross chunk boundaries,
175 // because such objects are both precisely and imprecisely marked:
176 // .. if the head of such an object is dirty, the entire object
177 // needs to be scanned, under the interpretation that this
178 // was an imprecise mark
179 // .. if the head of such an object is not dirty, we can assume
180 // precise marking and it's efficient to scan just the dirty
181 // cards.
182 // In either case, each scanned reference must be scanned precisely
183 // once so as to avoid cloning of a young referent. For efficiency,
184 // our closures depend on this property and do not protect against
185 // double scans.
186
187 uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
188 assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
189 uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
190
191 // First, set "our" lowest_non_clean entry, which would be
192 // used by the thread scanning an adjoining left chunk with
193 // a non-array object straddling the mutual boundary.
194 // Find the object that spans our boundary, if one exists.
195 // first_block is the block possibly straddling our left boundary.
196 HeapWord* first_block = sp->block_start(chunk_mr.start());
197 assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
198 "First chunk should always have a co-initial block");
199 // Does the block straddle the chunk's left boundary, and is it
200 // a non-array object?
201 if (first_block < chunk_mr.start() // first block straddles left bdry
202 && sp->block_is_obj(first_block) // first block is an object
203 && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
204 || oop(first_block)->is_typeArray())) {
205 // Find our least non-clean card, so that a left neighbor
206 // does not scan an object straddling the mutual boundary
207 // too far to the right, and attempt to scan a portion of
208 // that object twice.
209 jbyte* first_dirty_card = NULL;
210 jbyte* last_card_of_first_obj =
211 byte_for(first_block + sp->block_size(first_block) - 1);
212 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
213 jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
214 jbyte* last_card_to_check =
215 (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
216 (intptr_t) last_card_of_first_obj);
217 // Note that this does not need to go beyond our last card
218 // if our first object completely straddles this chunk.
219 for (jbyte* cur = first_card_of_cur_chunk;
220 cur <= last_card_to_check; cur++) {
221 jbyte val = *cur;
222 if (card_will_be_scanned(val)) {
223 first_dirty_card = cur; break;
224 } else {
225 assert(!card_may_have_been_dirty(val), "Error");
226 }
227 }
228 if (first_dirty_card != NULL) {
229 assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
230 assert(lowest_non_clean[cur_chunk_index] == NULL,
231 "Write exactly once : value should be stable hereafter for this round");
232 lowest_non_clean[cur_chunk_index] = first_dirty_card;
233 }
234 } else {
235 // In this case we can help our neighbor by just asking them
236 // to stop at our first card (even though it may not be dirty).
237 assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
238 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
239 lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
240 }
241
242 // Next, set our own max_to_do, which will strictly/exclusively bound
243 // the highest address that we will scan past the right end of our chunk.
244 HeapWord* max_to_do = NULL;
245 if (chunk_mr.end() < used.end()) {
246 // This is not the last chunk in the used region.
247 // What is our last block? We check the first block of
248 // the next (right) chunk rather than strictly check our last block
249 // because it's potentially more efficient to do so.
250 HeapWord* const last_block = sp->block_start(chunk_mr.end());
251 assert(last_block <= chunk_mr.end(), "In case this property changes.");
252 if ((last_block == chunk_mr.end()) // our last block does not straddle boundary
253 || !sp->block_is_obj(last_block) // last_block isn't an object
254 || oop(last_block)->is_objArray() // last_block is an array (precisely marked)
255 || oop(last_block)->is_typeArray()) {
256 max_to_do = chunk_mr.end();
257 } else {
258 assert(last_block < chunk_mr.end(), "Tautology");
259 // It is a non-array object that straddles the right boundary of this chunk.
260 // last_obj_card is the card corresponding to the start of the last object
261 // in the chunk. Note that the last object may not start in
262 // the chunk.
263 jbyte* const last_obj_card = byte_for(last_block);
264 const jbyte val = *last_obj_card;
265 if (!card_will_be_scanned(val)) {
266 assert(!card_may_have_been_dirty(val), "Error");
267 // The card containing the head is not dirty. Any marks on
268 // subsequent cards still in this chunk must have been made
269 // precisely; we can cap processing at the end of our chunk.
270 max_to_do = chunk_mr.end();
271 } else {
272 // The last object must be considered dirty, and extends onto the
273 // following chunk. Look for a dirty card in that chunk that will
274 // bound our processing.
275 jbyte* limit_card = NULL;
276 const size_t last_block_size = sp->block_size(last_block);
277 jbyte* const last_card_of_last_obj =
278 byte_for(last_block + last_block_size - 1);
279 jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
280 // This search potentially goes a long distance looking
281 // for the next card that will be scanned, terminating
282 // at the end of the last_block, if no earlier dirty card
283 // is found.
284 assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
285 "last card of next chunk may be wrong");
286 for (jbyte* cur = first_card_of_next_chunk;
287 cur <= last_card_of_last_obj; cur++) {
288 const jbyte val = *cur;
289 if (card_will_be_scanned(val)) {
290 limit_card = cur; break;
291 } else {
292 assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
293 }
294 }
295 if (limit_card != NULL) {
296 max_to_do = addr_for(limit_card);
297 assert(limit_card != NULL && max_to_do != NULL, "Error");
298 } else {
299 // The following is a pessimistic value, because it's possible
300 // that a dirty card on a subsequent chunk has been cleared by
301 // the time we get to look at it; we'll correct for that further below,
302 // using the LNC array which records the least non-clean card
303 // before cards were cleared in a particular chunk.
304 limit_card = last_card_of_last_obj;
305 max_to_do = last_block + last_block_size;
306 assert(limit_card != NULL && max_to_do != NULL, "Error");
307 }
308 assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
324 - lowest_non_clean_base_chunk_index;
325 if (last_chunk_index_to_check > last_chunk_index) {
326 assert(last_block + last_block_size > used.end(),
327 "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
328 " does not exceed used.end() = " PTR_FORMAT ","
329 " yet last_chunk_index_to_check " INTPTR_FORMAT
330 " exceeds last_chunk_index " INTPTR_FORMAT,
331 p2i(last_block), p2i(last_block + last_block_size),
332 p2i(used.end()),
333 last_chunk_index_to_check, last_chunk_index);
334 assert(sp->used_region().end() > used.end(),
335 "Expansion did not happen: "
336 "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
337 p2i(sp->used_region().start()), p2i(sp->used_region().end()),
338 p2i(used.start()), p2i(used.end()));
339 last_chunk_index_to_check = last_chunk_index;
340 }
341 for (uintptr_t lnc_index = cur_chunk_index + 1;
342 lnc_index <= last_chunk_index_to_check;
343 lnc_index++) {
344 jbyte* lnc_card = lowest_non_clean[lnc_index];
345 if (lnc_card != NULL) {
346 // we can stop at the first non-NULL entry we find
347 if (lnc_card <= limit_card) {
348 limit_card = lnc_card;
349 max_to_do = addr_for(limit_card);
350 assert(limit_card != NULL && max_to_do != NULL, "Error");
351 }
352 // In any case, we break now
353 break;
354 } // else continue to look for a non-NULL entry if any
355 }
356 assert(limit_card != NULL && max_to_do != NULL, "Error");
357 }
358 assert(max_to_do != NULL, "OOPS 1 !");
359 }
360 assert(max_to_do != NULL, "OOPS 2!");
361 } else {
362 max_to_do = used.end();
363 }
364 assert(max_to_do != NULL, "OOPS 3!");
365 // Now we can set the closure we're using so it doesn't to beyond
366 // max_to_do.
367 dcto_cl->set_min_done(max_to_do);
368 #ifndef PRODUCT
369 dcto_cl->set_last_bottom(max_to_do);
370 #endif
371 }
372
373 void
374 CardTableModRefBSForCTRS::
375 get_LNC_array_for_space(Space* sp,
376 jbyte**& lowest_non_clean,
377 uintptr_t& lowest_non_clean_base_chunk_index,
378 size_t& lowest_non_clean_chunk_size) {
379
380 int i = find_covering_region_containing(sp->bottom());
381 MemRegion covered = _covered[i];
382 size_t n_chunks = chunks_to_cover(covered);
383
384 // Only the first thread to obtain the lock will resize the
385 // LNC array for the covered region. Any later expansion can't affect
386 // the used_at_save_marks region.
387 // (I observed a bug in which the first thread to execute this would
388 // resize, and then it would cause "expand_and_allocate" that would
389 // increase the number of chunks in the covered region. Then a second
390 // thread would come and execute this, see that the size didn't match,
391 // and free and allocate again. So the first thread would be using a
392 // freed "_lowest_non_clean" array.)
393
394 // Do a dirty read here. If we pass the conditional then take the rare
395 // event lock and do the read again in case some other thread had already
396 // succeeded and done the resize.
|
29 #include "gc/shared/genCollectedHeap.hpp"
30 #include "gc/shared/space.inline.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/virtualspace.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/java.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "runtime/vmThread.hpp"
38
39 void CardTableModRefBSForCTRS::
40 non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
41 OopsInGenClosure* cl,
42 CardTableRS* ct,
43 uint n_threads) {
44 assert(n_threads > 0, "expected n_threads > 0");
45 assert(n_threads <= ParallelGCThreads,
46 "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
47
48 // Make sure the LNC array is valid for the space.
49 volatile jbyte** lowest_non_clean;
50 uintptr_t lowest_non_clean_base_chunk_index;
51 size_t lowest_non_clean_chunk_size;
52 get_LNC_array_for_space(sp, lowest_non_clean,
53 lowest_non_clean_base_chunk_index,
54 lowest_non_clean_chunk_size);
55
56 uint n_strides = n_threads * ParGCStridesPerThread;
57 SequentialSubTasksDone* pst = sp->par_seq_tasks();
58 // Sets the condition for completion of the subtask (how many threads
59 // need to finish in order to be done).
60 pst->set_n_threads(n_threads);
61 pst->set_n_tasks(n_strides);
62
63 uint stride = 0;
64 while (!pst->is_task_claimed(/* reference */ stride)) {
65 process_stride(sp, mr, stride, n_strides,
66 cl, ct,
67 lowest_non_clean,
68 lowest_non_clean_base_chunk_index,
69 lowest_non_clean_chunk_size);
71 if (pst->all_tasks_completed()) {
72 // Clear lowest_non_clean array for next time.
73 intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
74 uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
75 for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
76 intptr_t ind = ch - lowest_non_clean_base_chunk_index;
77 assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
78 "Bounds error");
79 lowest_non_clean[ind] = NULL;
80 }
81 }
82 }
83
84 void
85 CardTableModRefBSForCTRS::
86 process_stride(Space* sp,
87 MemRegion used,
88 jint stride, int n_strides,
89 OopsInGenClosure* cl,
90 CardTableRS* ct,
91 volatile jbyte** lowest_non_clean,
92 uintptr_t lowest_non_clean_base_chunk_index,
93 size_t lowest_non_clean_chunk_size) {
94 // We go from higher to lower addresses here; it wouldn't help that much
95 // because of the strided parallelism pattern used here.
96
97 // Find the first card address of the first chunk in the stride that is
98 // at least "bottom" of the used region.
99 volatile jbyte* start_card = byte_for(used.start());
100 volatile jbyte* end_card = byte_after(used.last());
101 uintptr_t start_chunk = addr_to_chunk_index(used.start());
102 uintptr_t start_chunk_stride_num = start_chunk % n_strides;
103 volatile jbyte* chunk_card_start;
104
105 if ((uintptr_t)stride >= start_chunk_stride_num) {
106 chunk_card_start = (volatile jbyte*)(start_card +
107 (stride - start_chunk_stride_num) *
108 ParGCCardsPerStrideChunk);
109 } else {
110 // Go ahead to the next chunk group boundary, then to the requested stride.
111 chunk_card_start = (volatile jbyte*)(start_card +
112 (n_strides - start_chunk_stride_num + stride) *
113 ParGCCardsPerStrideChunk);
114 }
115
116 while (chunk_card_start < end_card) {
117 // Even though we go from lower to higher addresses below, the
118 // strided parallelism can interleave the actual processing of the
119 // dirty pages in various ways. For a specific chunk within this
120 // stride, we take care to avoid double scanning or missing a card
121 // by suitably initializing the "min_done" field in process_chunk_boundaries()
122 // below, together with the dirty region extension accomplished in
123 // DirtyCardToOopClosure::do_MemRegion().
124 volatile jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
125 // Invariant: chunk_mr should be fully contained within the "used" region.
126 MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
127 chunk_card_end >= end_card ?
128 used.end() : addr_for(chunk_card_end));
129 assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
130 assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
131
132 // This function is used by the parallel card table iteration.
133 const bool parallel = true;
134
135 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
136 cl->gen_boundary(),
137 parallel);
138 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
139
140
141 // Process the chunk.
142 process_chunk_boundaries(sp,
143 dcto_cl,
144 chunk_mr,
150 // We want the LNC array updates above in process_chunk_boundaries
151 // to be visible before any of the card table value changes as a
152 // result of the dirty card iteration below.
153 OrderAccess::storestore();
154
155 // We want to clear the cards: clear_cl here does the work of finding
156 // contiguous dirty ranges of cards to process and clear.
157 clear_cl.do_MemRegion(chunk_mr);
158
159 // Find the next chunk of the stride.
160 chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
161 }
162 }
163
164 void
165 CardTableModRefBSForCTRS::
166 process_chunk_boundaries(Space* sp,
167 DirtyCardToOopClosure* dcto_cl,
168 MemRegion chunk_mr,
169 MemRegion used,
170 volatile jbyte** lowest_non_clean,
171 uintptr_t lowest_non_clean_base_chunk_index,
172 size_t lowest_non_clean_chunk_size)
173 {
174 // We must worry about non-array objects that cross chunk boundaries,
175 // because such objects are both precisely and imprecisely marked:
176 // .. if the head of such an object is dirty, the entire object
177 // needs to be scanned, under the interpretation that this
178 // was an imprecise mark
179 // .. if the head of such an object is not dirty, we can assume
180 // precise marking and it's efficient to scan just the dirty
181 // cards.
182 // In either case, each scanned reference must be scanned precisely
183 // once so as to avoid cloning of a young referent. For efficiency,
184 // our closures depend on this property and do not protect against
185 // double scans.
186
187 uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
188 assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
189 uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
190
191 // First, set "our" lowest_non_clean entry, which would be
192 // used by the thread scanning an adjoining left chunk with
193 // a non-array object straddling the mutual boundary.
194 // Find the object that spans our boundary, if one exists.
195 // first_block is the block possibly straddling our left boundary.
196 HeapWord* first_block = sp->block_start(chunk_mr.start());
197 assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
198 "First chunk should always have a co-initial block");
199 // Does the block straddle the chunk's left boundary, and is it
200 // a non-array object?
201 if (first_block < chunk_mr.start() // first block straddles left bdry
202 && sp->block_is_obj(first_block) // first block is an object
203 && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
204 || oop(first_block)->is_typeArray())) {
205 // Find our least non-clean card, so that a left neighbor
206 // does not scan an object straddling the mutual boundary
207 // too far to the right, and attempt to scan a portion of
208 // that object twice.
209 volatile jbyte* first_dirty_card = NULL;
210 volatile jbyte* last_card_of_first_obj =
211 byte_for(first_block + sp->block_size(first_block) - 1);
212 volatile jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
213 volatile jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
214 volatile jbyte* last_card_to_check =
215 (volatile jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
216 (intptr_t) last_card_of_first_obj);
217 // Note that this does not need to go beyond our last card
218 // if our first object completely straddles this chunk.
219 for (volatile jbyte* cur = first_card_of_cur_chunk;
220 cur <= last_card_to_check; cur++) {
221 jbyte val = *cur;
222 if (card_will_be_scanned(val)) {
223 first_dirty_card = cur; break;
224 } else {
225 assert(!card_may_have_been_dirty(val), "Error");
226 }
227 }
228 if (first_dirty_card != NULL) {
229 assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
230 assert(lowest_non_clean[cur_chunk_index] == NULL,
231 "Write exactly once : value should be stable hereafter for this round");
232 lowest_non_clean[cur_chunk_index] = first_dirty_card;
233 }
234 } else {
235 // In this case we can help our neighbor by just asking them
236 // to stop at our first card (even though it may not be dirty).
237 assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
238 volatile jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
239 lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
240 }
241
242 // Next, set our own max_to_do, which will strictly/exclusively bound
243 // the highest address that we will scan past the right end of our chunk.
244 HeapWord* max_to_do = NULL;
245 if (chunk_mr.end() < used.end()) {
246 // This is not the last chunk in the used region.
247 // What is our last block? We check the first block of
248 // the next (right) chunk rather than strictly check our last block
249 // because it's potentially more efficient to do so.
250 HeapWord* const last_block = sp->block_start(chunk_mr.end());
251 assert(last_block <= chunk_mr.end(), "In case this property changes.");
252 if ((last_block == chunk_mr.end()) // our last block does not straddle boundary
253 || !sp->block_is_obj(last_block) // last_block isn't an object
254 || oop(last_block)->is_objArray() // last_block is an array (precisely marked)
255 || oop(last_block)->is_typeArray()) {
256 max_to_do = chunk_mr.end();
257 } else {
258 assert(last_block < chunk_mr.end(), "Tautology");
259 // It is a non-array object that straddles the right boundary of this chunk.
260 // last_obj_card is the card corresponding to the start of the last object
261 // in the chunk. Note that the last object may not start in
262 // the chunk.
263 volatile jbyte* const last_obj_card = byte_for(last_block);
264 const jbyte val = *last_obj_card;
265 if (!card_will_be_scanned(val)) {
266 assert(!card_may_have_been_dirty(val), "Error");
267 // The card containing the head is not dirty. Any marks on
268 // subsequent cards still in this chunk must have been made
269 // precisely; we can cap processing at the end of our chunk.
270 max_to_do = chunk_mr.end();
271 } else {
272 // The last object must be considered dirty, and extends onto the
273 // following chunk. Look for a dirty card in that chunk that will
274 // bound our processing.
275 volatile jbyte* limit_card = NULL;
276 const size_t last_block_size = sp->block_size(last_block);
277 volatile jbyte* const last_card_of_last_obj =
278 byte_for(last_block + last_block_size - 1);
279 volatile jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
280 // This search potentially goes a long distance looking
281 // for the next card that will be scanned, terminating
282 // at the end of the last_block, if no earlier dirty card
283 // is found.
284 assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
285 "last card of next chunk may be wrong");
286 for (volatile jbyte* cur = first_card_of_next_chunk;
287 cur <= last_card_of_last_obj; cur++) {
288 const volatile jbyte val = *cur;
289 if (card_will_be_scanned(val)) {
290 limit_card = cur; break;
291 } else {
292 assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
293 }
294 }
295 if (limit_card != NULL) {
296 max_to_do = addr_for(limit_card);
297 assert(limit_card != NULL && max_to_do != NULL, "Error");
298 } else {
299 // The following is a pessimistic value, because it's possible
300 // that a dirty card on a subsequent chunk has been cleared by
301 // the time we get to look at it; we'll correct for that further below,
302 // using the LNC array which records the least non-clean card
303 // before cards were cleared in a particular chunk.
304 limit_card = last_card_of_last_obj;
305 max_to_do = last_block + last_block_size;
306 assert(limit_card != NULL && max_to_do != NULL, "Error");
307 }
308 assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
324 - lowest_non_clean_base_chunk_index;
325 if (last_chunk_index_to_check > last_chunk_index) {
326 assert(last_block + last_block_size > used.end(),
327 "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
328 " does not exceed used.end() = " PTR_FORMAT ","
329 " yet last_chunk_index_to_check " INTPTR_FORMAT
330 " exceeds last_chunk_index " INTPTR_FORMAT,
331 p2i(last_block), p2i(last_block + last_block_size),
332 p2i(used.end()),
333 last_chunk_index_to_check, last_chunk_index);
334 assert(sp->used_region().end() > used.end(),
335 "Expansion did not happen: "
336 "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
337 p2i(sp->used_region().start()), p2i(sp->used_region().end()),
338 p2i(used.start()), p2i(used.end()));
339 last_chunk_index_to_check = last_chunk_index;
340 }
341 for (uintptr_t lnc_index = cur_chunk_index + 1;
342 lnc_index <= last_chunk_index_to_check;
343 lnc_index++) {
344 volatile jbyte* lnc_card = lowest_non_clean[lnc_index];
345 if (lnc_card != NULL) {
346 // we can stop at the first non-NULL entry we find
347 if (lnc_card <= limit_card) {
348 limit_card = lnc_card;
349 max_to_do = addr_for(limit_card);
350 assert(limit_card != NULL && max_to_do != NULL, "Error");
351 }
352 // In any case, we break now
353 break;
354 } // else continue to look for a non-NULL entry if any
355 }
356 assert(limit_card != NULL && max_to_do != NULL, "Error");
357 }
358 assert(max_to_do != NULL, "OOPS 1 !");
359 }
360 assert(max_to_do != NULL, "OOPS 2!");
361 } else {
362 max_to_do = used.end();
363 }
364 assert(max_to_do != NULL, "OOPS 3!");
365 // Now we can set the closure we're using so it doesn't to beyond
366 // max_to_do.
367 dcto_cl->set_min_done(max_to_do);
368 #ifndef PRODUCT
369 dcto_cl->set_last_bottom(max_to_do);
370 #endif
371 }
372
373 void
374 CardTableModRefBSForCTRS::
375 get_LNC_array_for_space(Space* sp,
376 volatile jbyte**& lowest_non_clean,
377 uintptr_t& lowest_non_clean_base_chunk_index,
378 size_t& lowest_non_clean_chunk_size) {
379
380 int i = find_covering_region_containing(sp->bottom());
381 MemRegion covered = _covered[i];
382 size_t n_chunks = chunks_to_cover(covered);
383
384 // Only the first thread to obtain the lock will resize the
385 // LNC array for the covered region. Any later expansion can't affect
386 // the used_at_save_marks region.
387 // (I observed a bug in which the first thread to execute this would
388 // resize, and then it would cause "expand_and_allocate" that would
389 // increase the number of chunks in the covered region. Then a second
390 // thread would come and execute this, see that the size didn't match,
391 // and free and allocate again. So the first thread would be using a
392 // freed "_lowest_non_clean" array.)
393
394 // Do a dirty read here. If we pass the conditional then take the rare
395 // event lock and do the read again in case some other thread had already
396 // succeeded and done the resize.
|