150 } 151 152 153 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 154 jbyte entry_val = *entry; 155 assert(entry_val != CardTableRS::clean_card_val(), 156 "We shouldn't be looking at clean cards, and this should " 157 "be the only place they get cleaned."); 158 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 159 "This should be possible in the sequential case."); 160 *entry = CardTableRS::clean_card_val(); 161 return true; 162 } 163 164 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 165 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : 166 _dirty_card_closure(dirty_card_closure), _ct(ct) { 167 // Cannot yet substitute active_workers for n_par_threads 168 // in the case where parallelism is being turned off by 169 // setting n_par_threads to 0. 170 _is_par = (SharedHeap::heap()->n_par_threads() > 0); 171 assert(!_is_par || 172 (SharedHeap::heap()->n_par_threads() == 173 SharedHeap::heap()->workers()->active_workers()), "Mismatch"); 174 } 175 176 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 177 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 178 } 179 180 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 181 assert(mr.word_size() > 0, "Error"); 182 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 183 // mr.end() may not necessarily be card aligned. 184 jbyte* cur_entry = _ct->byte_for(mr.last()); 185 const jbyte* limit = _ct->byte_for(mr.start()); 186 HeapWord* end_of_non_clean = mr.end(); 187 HeapWord* start_of_non_clean = end_of_non_clean; 188 while (cur_entry >= limit) { 189 HeapWord* cur_hw = _ct->addr_for(cur_entry); 190 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 191 // Continue the dirty range by opening the 192 // dirty window one card to the left. 193 start_of_non_clean = cur_hw; | 150 } 151 152 153 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { 154 jbyte entry_val = *entry; 155 assert(entry_val != CardTableRS::clean_card_val(), 156 "We shouldn't be looking at clean cards, and this should " 157 "be the only place they get cleaned."); 158 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, 159 "This should be possible in the sequential case."); 160 *entry = CardTableRS::clean_card_val(); 161 return true; 162 } 163 164 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( 165 DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : 166 _dirty_card_closure(dirty_card_closure), _ct(ct) { 167 // Cannot yet substitute active_workers for n_par_threads 168 // in the case where parallelism is being turned off by 169 // setting n_par_threads to 0. 170 _is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); 171 assert(!_is_par || 172 (GenCollectedHeap::heap()->n_par_threads() == 173 GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch"); 174 } 175 176 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { 177 return (((intptr_t)entry) & (BytesPerWord-1)) == 0; 178 } 179 180 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { 181 assert(mr.word_size() > 0, "Error"); 182 assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); 183 // mr.end() may not necessarily be card aligned. 184 jbyte* cur_entry = _ct->byte_for(mr.last()); 185 const jbyte* limit = _ct->byte_for(mr.start()); 186 HeapWord* end_of_non_clean = mr.end(); 187 HeapWord* start_of_non_clean = end_of_non_clean; 188 while (cur_entry >= limit) { 189 HeapWord* cur_hw = _ct->addr_for(cur_entry); 190 if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { 191 // Continue the dirty range by opening the 192 // dirty window one card to the left. 193 start_of_non_clean = cur_hw; |