< prev index next >

src/hotspot/share/gc/shared/cardTableRS.cpp

Print this page

        

*** 110,126 **** // the inline barrier is correct. set_cur_youngergen_card_val(youngergen_card); } } ! void CardTableRS::younger_refs_iterate(Generation* g, ! OopsInGenClosure* blk, ! uint n_threads) { // The indexing in this array is slightly odd. We want to access // the old generation record here, which is at index 2. _last_cur_val_in_gen[2] = cur_youngergen_card_val(); - g->younger_refs_iterate(blk, n_threads); } inline bool ClearNoncleanCardWrapper::clear_card(CardValue* entry) { if (_is_par) { return clear_card_parallel(entry); --- 110,123 ---- // the inline barrier is correct. set_cur_youngergen_card_val(youngergen_card); } } ! void CardTableRS::at_younger_refs_iterate() { // The indexing in this array is slightly odd. We want to access // the old generation record here, which is at index 2. _last_cur_val_in_gen[2] = cur_youngergen_card_val(); } inline bool ClearNoncleanCardWrapper::clear_card(CardValue* entry) { if (_is_par) { return clear_card_parallel(entry);
*** 243,258 **** _dirty_card_closure->do_MemRegion(mrd); } } void CardTableRS::younger_refs_in_space_iterate(Space* sp, ! OopsInGenClosure* cl, uint n_threads) { verify_used_region_at_save_marks(sp); const MemRegion urasm = sp->used_region_at_save_marks(); ! non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); } #ifdef ASSERT void CardTableRS::verify_used_region_at_save_marks(Space* sp) const { MemRegion ur = sp->used_region(); --- 240,256 ---- _dirty_card_closure->do_MemRegion(mrd); } } void CardTableRS::younger_refs_in_space_iterate(Space* sp, ! HeapWord* gen_boundary, ! OopIterateClosure* cl, uint n_threads) { verify_used_region_at_save_marks(sp); const MemRegion urasm = sp->used_region_at_save_marks(); ! non_clean_card_iterate_possibly_parallel(sp, gen_boundary, urasm, cl, this, n_threads); } #ifdef ASSERT void CardTableRS::verify_used_region_at_save_marks(Space* sp) const { MemRegion ur = sp->used_region();
*** 622,633 **** CardTableRS::youngergen_may_have_been_dirty(cv)); } void CardTableRS::non_clean_card_iterate_possibly_parallel( Space* sp, MemRegion mr, ! OopsInGenClosure* cl, CardTableRS* ct, uint n_threads) { if (!mr.is_empty()) { if (n_threads > 0) { --- 620,632 ---- CardTableRS::youngergen_may_have_been_dirty(cv)); } void CardTableRS::non_clean_card_iterate_possibly_parallel( Space* sp, + HeapWord* gen_boundary, MemRegion mr, ! OopIterateClosure* cl, CardTableRS* ct, uint n_threads) { if (!mr.is_empty()) { if (n_threads > 0) {
*** 636,655 **** // clear_cl finds contiguous dirty ranges of cards to process and clear. // This is the single-threaded version used by DefNew. const bool parallel = false; ! DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); clear_cl.do_MemRegion(mr); } } } void CardTableRS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, ! OopsInGenClosure* cl, CardTableRS* ct, uint n_threads) { fatal("Parallel gc not supported here."); } bool CardTableRS::is_in_young(oop obj) const { --- 635,654 ---- // clear_cl finds contiguous dirty ranges of cards to process and clear. // This is the single-threaded version used by DefNew. const bool parallel = false; ! DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), gen_boundary, parallel); ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); clear_cl.do_MemRegion(mr); } } } void CardTableRS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, ! OopIterateClosure* cl, CardTableRS* ct, uint n_threads) { fatal("Parallel gc not supported here."); } bool CardTableRS::is_in_young(oop obj) const {
< prev index next >