1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)cardTableModRefBS.cpp 1.60 07/12/05 23:34:34 JVM"
3 #endif
4 /*
5 * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
182 }
183
184 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
185 MemRegion mr) const {
186 MemRegion result = mr;
187 for (int r = 0; r < _cur_covered_regions; r += 1) {
188 if (r != self) {
189 result = result.minus(_committed[r]);
190 }
191 }
192 // Never include the guard page.
193 result = result.minus(_guard_region);
194 return result;
195 }
196
197 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
198 // We don't change the start of a region, only the end.
199 assert(_whole_heap.contains(new_region),
200 "attempt to cover area not in reserved area");
201 debug_only(verify_guard();)
202 int ind = find_covering_region_by_base(new_region.start());
203 MemRegion old_region = _covered[ind];
204 assert(old_region.start() == new_region.start(), "just checking");
205 if (new_region.word_size() != old_region.word_size()) {
206 // Commit new or uncommit old pages, if necessary.
207 MemRegion cur_committed = _committed[ind];
208 // Extend the end of this _commited region
209 // to cover the end of any lower _committed regions.
210 // This forms overlapping regions, but never interior regions.
211 HeapWord* max_prev_end = largest_prev_committed_end(ind);
212 if (max_prev_end > cur_committed.end()) {
213 cur_committed.set_end(max_prev_end);
214 }
215 // Align the end up to a page size (starts are already aligned).
216 jbyte* new_end = byte_after(new_region.last());
217 HeapWord* new_end_aligned =
218 (HeapWord*)align_size_up((uintptr_t)new_end, _page_size);
219 assert(new_end_aligned >= (HeapWord*) new_end,
220 "align up, but less");
221 // The guard page is always committed and should not be committed over.
222 HeapWord* new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
223 if (new_end_for_commit > cur_committed.end()) {
224 // Must commit new pages.
225 MemRegion new_committed =
226 MemRegion(cur_committed.end(), new_end_for_commit);
227
228 assert(!new_committed.is_empty(), "Region should not be empty here");
229 if (!os::commit_memory((char*)new_committed.start(),
230 new_committed.byte_size(), _page_size)) {
231 // Do better than this for Merlin
232 vm_exit_out_of_memory(new_committed.byte_size(),
233 "card table expansion");
234 }
235 // Use new_end_aligned (as opposed to new_end_for_commit) because
236 // the cur_committed region may include the guard region.
237 } else if (new_end_aligned < cur_committed.end()) {
238 // Must uncommit pages.
239 MemRegion uncommit_region =
240 committed_unique_to_self(ind, MemRegion(new_end_aligned,
241 cur_committed.end()));
242 if (!uncommit_region.is_empty()) {
243 if (!os::uncommit_memory((char*)uncommit_region.start(),
244 uncommit_region.byte_size())) {
245 // Do better than this for Merlin
246 vm_exit_out_of_memory(uncommit_region.byte_size(),
247 "card table contraction");
248 }
249 }
250 }
251 // In any case, we can reset the end of the current committed entry.
252 _committed[ind].set_end(new_end_aligned);
253
254 // The default of 0 is not necessarily clean cards.
255 jbyte* entry;
256 if (old_region.last() < _whole_heap.start()) {
257 entry = byte_for(_whole_heap.start());
258 } else {
259 entry = byte_after(old_region.last());
260 }
261 assert(index_for(new_region.last()) < (int) _guard_index,
262 "The guard card will be overwritten");
263 jbyte* end = byte_after(new_region.last());
264 // do nothing if we resized downward.
265 if (entry < end) {
266 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
267 }
268 }
269 // In any case, the covered size changes.
270 _covered[ind].set_word_size(new_region.word_size());
271 if (TraceCardTableModRefBS) {
272 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
273 gclog_or_tty->print_cr(" "
274 " _covered[%d].start(): " INTPTR_FORMAT
275 " _covered[%d].last(): " INTPTR_FORMAT,
276 ind, _covered[ind].start(),
277 ind, _covered[ind].last());
278 gclog_or_tty->print_cr(" "
279 " _committed[%d].start(): " INTPTR_FORMAT
280 " _committed[%d].last(): " INTPTR_FORMAT,
281 ind, _committed[ind].start(),
282 ind, _committed[ind].last());
283 gclog_or_tty->print_cr(" "
284 " byte_for(start): " INTPTR_FORMAT
285 " byte_for(last): " INTPTR_FORMAT,
286 byte_for(_covered[ind].start()),
287 byte_for(_covered[ind].last()));
288 gclog_or_tty->print_cr(" "
289 " addr_for(start): " INTPTR_FORMAT
290 " addr_for(last): " INTPTR_FORMAT,
291 addr_for((jbyte*) _committed[ind].start()),
292 addr_for((jbyte*) _committed[ind].last()));
293 }
294 debug_only(verify_guard();)
295 }
296
297 // Note that these versions are precise! The scanning code has to handle the
298 // fact that the write barrier may be either precise or imprecise.
299
300 void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
301 inline_write_ref_field(field, newVal);
302 }
303
304
305 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
306 MemRegion mr,
307 DirtyCardToOopClosure* dcto_cl,
308 MemRegionClosure* cl,
309 bool clear) {
310 if (!mr.is_empty()) {
311 int n_threads = SharedHeap::heap()->n_par_threads();
312 if (n_threads > 0) {
313 #ifndef SERIALGC
314 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
315 #else // SERIALGC
316 fatal("Parallel gc not supported here.");
317 #endif // SERIALGC
318 } else {
319 non_clean_card_iterate_work(mr, cl, clear);
320 }
321 }
322 }
323
384 // corresponding "delete".
385 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
386 MemRegion used_mr;
387 if (before_save_marks) {
388 used_mr = sp->used_region_at_save_marks();
389 } else {
390 used_mr = sp->used_region();
391 }
392 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
393 }
394
395 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
396 jbyte* cur = byte_for(mr.start());
397 jbyte* last = byte_after(mr.last());
398 while (cur < last) {
399 *cur = dirty_card;
400 cur++;
401 }
402 }
403
404 void CardTableModRefBS::invalidate(MemRegion mr) {
405 for (int i = 0; i < _cur_covered_regions; i++) {
406 MemRegion mri = mr.intersection(_covered[i]);
407 if (!mri.is_empty()) dirty_MemRegion(mri);
408 }
409 }
410
411 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
412 // Be conservative: only clean cards entirely contained within the
413 // region.
414 jbyte* cur;
415 if (mr.start() == _whole_heap.start()) {
416 cur = byte_for(mr.start());
417 } else {
418 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
419 cur = byte_after(mr.start() - 1);
420 }
421 jbyte* last = byte_after(mr.last());
422 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
423 }
424
425 void CardTableModRefBS::clear(MemRegion mr) {
426 for (int i = 0; i < _cur_covered_regions; i++) {
427 MemRegion mri = mr.intersection(_covered[i]);
428 if (!mri.is_empty()) clear_MemRegion(mri);
429 }
430 }
431
432 // NOTES:
433 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
434 // iterates over dirty cards ranges in increasing address order.
435 // (2) Unlike, e.g., dirty_card_range_after_preclean() below,
436 // this method does not make the dirty cards prelceaned.
437 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
438 MemRegionClosure* cl) {
439 for (int i = 0; i < _cur_covered_regions; i++) {
440 MemRegion mri = mr.intersection(_covered[i]);
441 if (!mri.is_empty()) {
442 jbyte *cur_entry, *next_entry, *limit;
443 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
444 cur_entry <= limit;
445 cur_entry = next_entry) {
446 next_entry = cur_entry + 1;
447 if (*cur_entry == dirty_card) {
448 size_t dirty_cards;
449 // Accumulate maximal dirty card range, starting at cur_entry
450 for (dirty_cards = 1;
451 next_entry <= limit && *next_entry == dirty_card;
452 dirty_cards++, next_entry++);
453 MemRegion cur_cards(addr_for(cur_entry),
454 dirty_cards*card_size_in_words);
455 cl->do_MemRegion(cur_cards);
456 }
457 }
458 }
459 }
460 }
461
462 MemRegion CardTableModRefBS::dirty_card_range_after_preclean(MemRegion mr) {
463 for (int i = 0; i < _cur_covered_regions; i++) {
464 MemRegion mri = mr.intersection(_covered[i]);
465 if (!mri.is_empty()) {
466 jbyte* cur_entry, *next_entry, *limit;
467 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
468 cur_entry <= limit;
469 cur_entry = next_entry) {
470 next_entry = cur_entry + 1;
471 if (*cur_entry == dirty_card) {
472 size_t dirty_cards;
473 // Accumulate maximal dirty card range, starting at cur_entry
474 for (dirty_cards = 1;
475 next_entry <= limit && *next_entry == dirty_card;
476 dirty_cards++, next_entry++);
477 MemRegion cur_cards(addr_for(cur_entry),
478 dirty_cards*card_size_in_words);
479 for (size_t i = 0; i < dirty_cards; i++) {
480 cur_entry[i] = precleaned_card;
481 }
482 return cur_cards;
483 }
484 }
485 }
486 }
487 return MemRegion(mr.end(), mr.end());
488 }
489
490 // Set all the dirty cards in the given region to "precleaned" state.
491 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
492 for (int i = 0; i < _cur_covered_regions; i++) {
493 MemRegion mri = mr.intersection(_covered[i]);
494 if (!mri.is_empty()) {
495 jbyte *cur_entry, *limit;
496 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
497 cur_entry <= limit;
498 cur_entry++) {
499 if (*cur_entry == dirty_card) {
500 *cur_entry = precleaned_card;
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)cardTableModRefBS.cpp 1.60 07/12/05 23:34:34 JVM"
3 #endif
4 /*
5 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
182 }
183
184 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
185 MemRegion mr) const {
186 MemRegion result = mr;
187 for (int r = 0; r < _cur_covered_regions; r += 1) {
188 if (r != self) {
189 result = result.minus(_committed[r]);
190 }
191 }
192 // Never include the guard page.
193 result = result.minus(_guard_region);
194 return result;
195 }
196
197 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
198 // We don't change the start of a region, only the end.
199 assert(_whole_heap.contains(new_region),
200 "attempt to cover area not in reserved area");
201 debug_only(verify_guard();)
202 // collided is true if the expansion would push into another committed region
203 debug_only(bool collided = false;)
204 int const ind = find_covering_region_by_base(new_region.start());
205 MemRegion const old_region = _covered[ind];
206 assert(old_region.start() == new_region.start(), "just checking");
207 if (new_region.word_size() != old_region.word_size()) {
208 // Commit new or uncommit old pages, if necessary.
209 MemRegion cur_committed = _committed[ind];
210 // Extend the end of this _commited region
211 // to cover the end of any lower _committed regions.
212 // This forms overlapping regions, but never interior regions.
213 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
214 if (max_prev_end > cur_committed.end()) {
215 cur_committed.set_end(max_prev_end);
216 }
217 // Align the end up to a page size (starts are already aligned).
218 jbyte* const new_end = byte_after(new_region.last());
219 HeapWord* new_end_aligned =
220 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
221 assert(new_end_aligned >= (HeapWord*) new_end,
222 "align up, but less");
223 int ri = 0;
224 for (ri = 0; ri < _cur_covered_regions; ri++) {
225 if (ri != ind) {
226 if (_committed[ri].contains(new_end_aligned)) {
227 assert((new_end_aligned >= _committed[ri].start()) &&
228 (_committed[ri].start() > _committed[ind].start()),
229 "New end of committed region is inconsistent");
230 new_end_aligned = _committed[ri].start();
231 assert(new_end_aligned > _committed[ind].start(),
232 "New end of committed region is before start");
233 debug_only(collided = true;)
234 // Should only collide with 1 region
235 break;
236 }
237 }
238 }
239 #ifdef ASSERT
240 for (++ri; ri < _cur_covered_regions; ri++) {
241 assert(!_committed[ri].contains(new_end_aligned),
242 "New end of committed region is in a second committed region");
243 }
244 #endif
245 // The guard page is always committed and should not be committed over.
246 HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
247 _guard_region.start());
248
249 if (new_end_for_commit > cur_committed.end()) {
250 // Must commit new pages.
251 MemRegion const new_committed =
252 MemRegion(cur_committed.end(), new_end_for_commit);
253
254 assert(!new_committed.is_empty(), "Region should not be empty here");
255 if (!os::commit_memory((char*)new_committed.start(),
256 new_committed.byte_size(), _page_size)) {
257 // Do better than this for Merlin
258 vm_exit_out_of_memory(new_committed.byte_size(),
259 "card table expansion");
260 }
261 // Use new_end_aligned (as opposed to new_end_for_commit) because
262 // the cur_committed region may include the guard region.
263 } else if (new_end_aligned < cur_committed.end()) {
264 // Must uncommit pages.
265 MemRegion const uncommit_region =
266 committed_unique_to_self(ind, MemRegion(new_end_aligned,
267 cur_committed.end()));
268 if (!uncommit_region.is_empty()) {
269 if (!os::uncommit_memory((char*)uncommit_region.start(),
270 uncommit_region.byte_size())) {
271 assert(false, "Card table contraction failed");
272 // The call failed so don't change the end of the
273 // committed region. This is better than taking the
274 // VM down.
275 new_end_aligned = _committed[ind].end();
276 }
277 }
278 }
279 // In any case, we can reset the end of the current committed entry.
280 _committed[ind].set_end(new_end_aligned);
281
282 // The default of 0 is not necessarily clean cards.
283 jbyte* entry;
284 if (old_region.last() < _whole_heap.start()) {
285 entry = byte_for(_whole_heap.start());
286 } else {
287 entry = byte_after(old_region.last());
288 }
289 assert(index_for(new_region.last()) < _guard_index,
290 "The guard card will be overwritten");
291 // This line commented out cleans the newly expanded region and
292 // not the aligned up expanded region.
293 // jbyte* const end = byte_after(new_region.last());
294 jbyte* const end = (jbyte*) new_end_for_commit;
295 assert((end >= byte_after(new_region.last())) || collided,
296 "Expect to be beyond new region unless impacting another region");
297 // do nothing if we resized downward.
298 #ifdef ASSERT
299 for (int ri = 0; ri < _cur_covered_regions; ri++) {
300 if (ri != ind) {
301 // The end of the new committed region should not
302 // be in any existing region unless it matches
303 // the start of the next region.
304 assert(!_committed[ri].contains(end) ||
305 (_committed[ri].start() == (HeapWord*) end),
306 "Overlapping committed regions");
307 }
308 }
309 #endif
310 if (entry < end) {
311 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
312 }
313 }
314 // In any case, the covered size changes.
315 _covered[ind].set_word_size(new_region.word_size());
316 if (TraceCardTableModRefBS) {
317 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
318 gclog_or_tty->print_cr(" "
319 " _covered[%d].start(): " INTPTR_FORMAT
320 " _covered[%d].last(): " INTPTR_FORMAT,
321 ind, _covered[ind].start(),
322 ind, _covered[ind].last());
323 gclog_or_tty->print_cr(" "
324 " _committed[%d].start(): " INTPTR_FORMAT
325 " _committed[%d].last(): " INTPTR_FORMAT,
326 ind, _committed[ind].start(),
327 ind, _committed[ind].last());
328 gclog_or_tty->print_cr(" "
329 " byte_for(start): " INTPTR_FORMAT
330 " byte_for(last): " INTPTR_FORMAT,
331 byte_for(_covered[ind].start()),
332 byte_for(_covered[ind].last()));
333 gclog_or_tty->print_cr(" "
334 " addr_for(start): " INTPTR_FORMAT
335 " addr_for(last): " INTPTR_FORMAT,
336 addr_for((jbyte*) _committed[ind].start()),
337 addr_for((jbyte*) _committed[ind].last()));
338 }
339 debug_only(verify_guard();)
340 }
341
342 // Note that these versions are precise! The scanning code has to handle the
343 // fact that the write barrier may be either precise or imprecise.
344
345 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
346 inline_write_ref_field(field, newVal);
347 }
348
349 /*
350 Claimed and deferred bits are used together in G1 during the evacuation
351 pause. These bits can have the following state transitions:
352 1. The claimed bit can be put over any other card state. Except that
353 the "dirty -> dirty and claimed" transition is checked for in
354 G1 code and is not used.
355 2. Deferred bit can be set only if the previous state of the card
356 was either clean or claimed. mark_card_deferred() is wait-free.
357 We do not care if the operation is be successful because if
358 it does not it will only result in duplicate entry in the update
359 buffer because of the "cache-miss". So it's not worth spinning.
360 */
361
362
363 bool CardTableModRefBS::claim_card(size_t card_index) {
364 jbyte val = _byte_map[card_index];
365 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
366 while (val == clean_card_val() ||
367 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
368 jbyte new_val = val;
369 if (val == clean_card_val()) {
370 new_val = (jbyte)claimed_card_val();
371 } else {
372 new_val = val | (jbyte)claimed_card_val();
373 }
374 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
375 if (res == val) {
376 return true;
377 }
378 val = res;
379 }
380 return false;
381 }
382
383 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
384 jbyte val = _byte_map[card_index];
385 // It's already processed
386 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
387 return false;
388 }
389 // Cached bit can be installed either on a clean card or on a claimed card.
390 jbyte new_val = val;
391 if (val == clean_card_val()) {
392 new_val = (jbyte)deferred_card_val();
393 } else {
394 if (val & claimed_card_val()) {
395 new_val = val | (jbyte)deferred_card_val();
396 }
397 }
398 if (new_val != val) {
399 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
400 }
401 return true;
402 }
403
404
405 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
406 MemRegion mr,
407 DirtyCardToOopClosure* dcto_cl,
408 MemRegionClosure* cl,
409 bool clear) {
410 if (!mr.is_empty()) {
411 int n_threads = SharedHeap::heap()->n_par_threads();
412 if (n_threads > 0) {
413 #ifndef SERIALGC
414 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
415 #else // SERIALGC
416 fatal("Parallel gc not supported here.");
417 #endif // SERIALGC
418 } else {
419 non_clean_card_iterate_work(mr, cl, clear);
420 }
421 }
422 }
423
484 // corresponding "delete".
485 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
486 MemRegion used_mr;
487 if (before_save_marks) {
488 used_mr = sp->used_region_at_save_marks();
489 } else {
490 used_mr = sp->used_region();
491 }
492 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
493 }
494
495 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
496 jbyte* cur = byte_for(mr.start());
497 jbyte* last = byte_after(mr.last());
498 while (cur < last) {
499 *cur = dirty_card;
500 cur++;
501 }
502 }
503
504 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
505 for (int i = 0; i < _cur_covered_regions; i++) {
506 MemRegion mri = mr.intersection(_covered[i]);
507 if (!mri.is_empty()) dirty_MemRegion(mri);
508 }
509 }
510
511 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
512 // Be conservative: only clean cards entirely contained within the
513 // region.
514 jbyte* cur;
515 if (mr.start() == _whole_heap.start()) {
516 cur = byte_for(mr.start());
517 } else {
518 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
519 cur = byte_after(mr.start() - 1);
520 }
521 jbyte* last = byte_after(mr.last());
522 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
523 }
524
525 void CardTableModRefBS::clear(MemRegion mr) {
526 for (int i = 0; i < _cur_covered_regions; i++) {
527 MemRegion mri = mr.intersection(_covered[i]);
528 if (!mri.is_empty()) clear_MemRegion(mri);
529 }
530 }
531
532 void CardTableModRefBS::dirty(MemRegion mr) {
533 jbyte* first = byte_for(mr.start());
534 jbyte* last = byte_after(mr.last());
535 memset(first, dirty_card, last-first);
536 }
537
538 // NOTES:
539 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
540 // iterates over dirty cards ranges in increasing address order.
541 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
542 MemRegionClosure* cl) {
543 for (int i = 0; i < _cur_covered_regions; i++) {
544 MemRegion mri = mr.intersection(_covered[i]);
545 if (!mri.is_empty()) {
546 jbyte *cur_entry, *next_entry, *limit;
547 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
548 cur_entry <= limit;
549 cur_entry = next_entry) {
550 next_entry = cur_entry + 1;
551 if (*cur_entry == dirty_card) {
552 size_t dirty_cards;
553 // Accumulate maximal dirty card range, starting at cur_entry
554 for (dirty_cards = 1;
555 next_entry <= limit && *next_entry == dirty_card;
556 dirty_cards++, next_entry++);
557 MemRegion cur_cards(addr_for(cur_entry),
558 dirty_cards*card_size_in_words);
559 cl->do_MemRegion(cur_cards);
560 }
561 }
562 }
563 }
564 }
565
566 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
567 bool reset,
568 int reset_val) {
569 for (int i = 0; i < _cur_covered_regions; i++) {
570 MemRegion mri = mr.intersection(_covered[i]);
571 if (!mri.is_empty()) {
572 jbyte* cur_entry, *next_entry, *limit;
573 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
574 cur_entry <= limit;
575 cur_entry = next_entry) {
576 next_entry = cur_entry + 1;
577 if (*cur_entry == dirty_card) {
578 size_t dirty_cards;
579 // Accumulate maximal dirty card range, starting at cur_entry
580 for (dirty_cards = 1;
581 next_entry <= limit && *next_entry == dirty_card;
582 dirty_cards++, next_entry++);
583 MemRegion cur_cards(addr_for(cur_entry),
584 dirty_cards*card_size_in_words);
585 if (reset) {
586 for (size_t i = 0; i < dirty_cards; i++) {
587 cur_entry[i] = reset_val;
588 }
589 }
590 return cur_cards;
591 }
592 }
593 }
594 }
595 return MemRegion(mr.end(), mr.end());
596 }
597
598 // Set all the dirty cards in the given region to "precleaned" state.
599 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
600 for (int i = 0; i < _cur_covered_regions; i++) {
601 MemRegion mri = mr.intersection(_covered[i]);
602 if (!mri.is_empty()) {
603 jbyte *cur_entry, *limit;
604 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
605 cur_entry <= limit;
606 cur_entry++) {
607 if (*cur_entry == dirty_card) {
608 *cur_entry = precleaned_card;
|