118 typedef jbyte IsDirtyRegionState;
119 static const IsDirtyRegionState Clean = 0;
120 static const IsDirtyRegionState Dirty = 1;
121 // Holds a flag for every region whether it is in the _dirty_region_buffer already
122 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools.
123 IsDirtyRegionState* _in_dirty_region_buffer;
124 size_t _cur_dirty_region;
125
126 // Creates a snapshot of the current _top values at the start of collection to
127 // filter out card marks that we do not want to scan.
128 class G1ResetScanTopClosure : public HeapRegionClosure {
129 private:
130 HeapWord** _scan_top;
131 public:
132 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { }
133
134 virtual bool do_heap_region(HeapRegion* r) {
135 uint hrm_index = r->hrm_index();
136 if (!r->in_collection_set() && r->is_old_or_humongous_or_archive()) {
137 _scan_top[hrm_index] = r->top();
138 } else {
139 _scan_top[hrm_index] = r->bottom();
140 }
141 return false;
142 }
143 };
144
145 // For each region, contains the maximum top() value to be used during this garbage
146 // collection. Subsumes common checks like filtering out everything but old and
147 // humongous regions outside the collection set.
148 // This is valid because we are not interested in scanning stray remembered set
149 // entries from free or archive regions.
150 HeapWord** _scan_top;
151 public:
152 G1RemSetScanState() :
153 _max_regions(0),
154 _iter_states(NULL),
155 _iter_claims(NULL),
156 _dirty_region_buffer(NULL),
157 _in_dirty_region_buffer(NULL),
158 _cur_dirty_region(0),
159 _scan_top(NULL) {
174 }
175 if (_scan_top != NULL) {
176 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
177 }
178 }
179
180 void initialize(uint max_regions) {
181 assert(_iter_states == NULL, "Must not be initialized twice");
182 assert(_iter_claims == NULL, "Must not be initialized twice");
183 _max_regions = max_regions;
184 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
185 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
186 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
187 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC);
188 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC);
189 }
190
191 void reset() {
192 for (uint i = 0; i < _max_regions; i++) {
193 _iter_states[i] = Unclaimed;
194 }
195
196 G1ResetScanTopClosure cl(_scan_top);
197 G1CollectedHeap::heap()->heap_region_iterate(&cl);
198
199 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t));
200 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState));
201 _cur_dirty_region = 0;
202 }
203
204 // Attempt to claim the remembered set of the region for iteration. Returns true
205 // if this call caused the transition from Unclaimed to Claimed.
206 inline bool claim_iter(uint region) {
207 assert(region < _max_regions, "Tried to access invalid region %u", region);
208 if (_iter_states[region] != Unclaimed) {
209 return false;
210 }
211 G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed);
212 return (res == Unclaimed);
213 }
333
334 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
335 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
336 _scan_objs_on_card_cl->set_region(card_region);
337 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
338 _scan_objs_on_card_cl->trim_queue_partially();
339 _cards_scanned++;
340 }
341
342 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
343 EventGCPhaseParallel event;
344 uint const region_idx = r->hrm_index();
345
346 if (_scan_state->claim_iter(region_idx)) {
347 // If we ever free the collection set concurrently, we should also
348 // clear the card table concurrently therefore we won't need to
349 // add regions of the collection set to the dirty cards region.
350 _scan_state->add_dirty_region(region_idx);
351 }
352
353 // We claim cards in blocks so as to reduce the contention.
354 size_t const block_size = G1RSetScanBlockSize;
355
356 HeapRegionRemSetIterator iter(r->rem_set());
357 size_t card_index;
358
359 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size);
360 for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
361 if (current_card >= claimed_card_block + block_size) {
362 claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size);
363 }
364 if (current_card < claimed_card_block) {
365 _cards_skipped++;
366 continue;
367 }
368 _cards_claimed++;
369
370 // If the card is dirty, then G1 will scan it during Update RS.
371 if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
372 continue;
373 }
374
375 HeapWord* const card_start = _g1h->bot()->address_for_index(card_index);
376 uint const region_idx_for_card = _g1h->addr_to_region(card_start);
377
378 assert(_g1h->region_at(region_idx_for_card)->is_in_reserved(card_start),
379 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index());
380 HeapWord* const top = _scan_state->scan_top(region_idx_for_card);
381 if (card_start >= top) {
382 continue;
383 }
384
385 // We claim lazily (so races are possible but they're benign), which reduces the
386 // number of duplicate scans (the rsets of the regions in the cset can intersect).
387 // Claim the card after checking bounds above: the remembered set may contain
388 // random cards into current survivor, and we would then have an incorrectly
389 // claimed card in survivor space. Card table clear does not reset the card table
390 // of survivor space regions.
391 claim_card(card_index, region_idx_for_card);
392
393 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
394
395 scan_card(mr, region_idx_for_card);
396 }
397 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanRS));
398 }
399
400 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
401 EventGCPhaseParallel event;
402 r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
403 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots));
404 }
528 _scan_state->clear_card_table(_g1h->workers());
529 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
530 }
531
532 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
533 #ifdef ASSERT
534 G1CollectedHeap* g1h = G1CollectedHeap::heap();
535 assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
536 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
537 p2i(card_ptr),
538 ct->index_for(ct->addr_for(card_ptr)),
539 p2i(ct->addr_for(card_ptr)),
540 g1h->addr_to_region(ct->addr_for(card_ptr)));
541 #endif
542 }
543
544 void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
545 uint worker_i) {
546 assert(!_g1h->is_gc_active(), "Only call concurrently");
547
548 check_card_ptr(card_ptr, _ct);
549
550 // If the card is no longer dirty, nothing to do.
551 if (*card_ptr != G1CardTable::dirty_card_val()) {
552 return;
553 }
554
555 // Construct the region representing the card.
556 HeapWord* start = _ct->addr_for(card_ptr);
557 // And find the region containing it.
558 HeapRegion* r = _g1h->heap_region_containing(start);
559
560 // This check is needed for some uncommon cases where we should
561 // ignore the card.
562 //
563 // The region could be young. Cards for young regions are
564 // distinctly marked (set to g1_young_gen), so the post-barrier will
565 // filter them out. However, that marking is performed
566 // concurrently. A write to a young object could occur before the
567 // card has been marked young, slipping past the filter.
568 //
569 // The card could be stale, because the region has been freed since
570 // the card was recorded. In this case the region type could be
571 // anything. If (still) free or (reallocated) young, just ignore
572 // it. If (reallocated) old or humongous, the later card trimming
573 // and additional checks in iteration may detect staleness. At
574 // worst, we end up processing a stale card unnecessarily.
575 //
576 // In the normal (non-stale) case, the synchronization between the
577 // enqueueing of the card and processing it here will have ensured
578 // we see the up-to-date region type here.
579 if (!r->is_old_or_humongous_or_archive()) {
662 if (!card_processed) {
663 // The card might have gotten re-dirtied and re-enqueued while we
664 // worked. (In fact, it's pretty likely.)
665 if (*card_ptr != G1CardTable::dirty_card_val()) {
666 *card_ptr = G1CardTable::dirty_card_val();
667 MutexLockerEx x(Shared_DirtyCardQ_lock,
668 Mutex::_no_safepoint_check_flag);
669 DirtyCardQueue* sdcq =
670 G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue();
671 sdcq->enqueue(card_ptr);
672 }
673 } else {
674 _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
675 }
676 }
677
678 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
679 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
680 assert(_g1h->is_gc_active(), "Only call during GC");
681
682 check_card_ptr(card_ptr, _ct);
683
684 // If the card is no longer dirty, nothing to do. This covers cards that were already
685 // scanned as parts of the remembered sets.
686 if (*card_ptr != G1CardTable::dirty_card_val()) {
687 return false;
688 }
689
690 // We claim lazily (so races are possible but they're benign), which reduces the
691 // number of potential duplicate scans (multiple threads may enqueue the same card twice).
692 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
693
694 // Construct the region representing the card.
695 HeapWord* card_start = _ct->addr_for(card_ptr);
696 // And find the region containing it.
697 uint const card_region_idx = _g1h->addr_to_region(card_start);
698
699 _scan_state->add_dirty_region(card_region_idx);
700 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx);
701 if (scan_limit <= card_start) {
702 // If the card starts above the area in the region containing objects to scan, skip it.
703 return false;
704 }
705
706 // Don't use addr_for(card_ptr + 1) which can ask for
707 // a card beyond the heap.
708 HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
709 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
710 assert(!dirty_region.is_empty(), "sanity");
711
712 HeapRegion* const card_region = _g1h->region_at(card_region_idx);
713 update_rs_cl->set_region(card_region);
714 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl);
715 assert(card_processed, "must be");
716 return true;
717 }
718
719 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
720 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
|
118 typedef jbyte IsDirtyRegionState;
119 static const IsDirtyRegionState Clean = 0;
120 static const IsDirtyRegionState Dirty = 1;
121 // Holds a flag for every region whether it is in the _dirty_region_buffer already
122 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools.
123 IsDirtyRegionState* _in_dirty_region_buffer;
124 size_t _cur_dirty_region;
125
126 // Creates a snapshot of the current _top values at the start of collection to
127 // filter out card marks that we do not want to scan.
128 class G1ResetScanTopClosure : public HeapRegionClosure {
129 private:
130 HeapWord** _scan_top;
131 public:
132 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { }
133
134 virtual bool do_heap_region(HeapRegion* r) {
135 uint hrm_index = r->hrm_index();
136 if (!r->in_collection_set() && r->is_old_or_humongous_or_archive()) {
137 _scan_top[hrm_index] = r->top();
138 }
139 return false;
140 }
141 };
142
143 // For each region, contains the maximum top() value to be used during this garbage
144 // collection. Subsumes common checks like filtering out everything but old and
145 // humongous regions outside the collection set.
146 // This is valid because we are not interested in scanning stray remembered set
147 // entries from free or archive regions.
148 HeapWord** _scan_top;
149 public:
150 G1RemSetScanState() :
151 _max_regions(0),
152 _iter_states(NULL),
153 _iter_claims(NULL),
154 _dirty_region_buffer(NULL),
155 _in_dirty_region_buffer(NULL),
156 _cur_dirty_region(0),
157 _scan_top(NULL) {
172 }
173 if (_scan_top != NULL) {
174 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
175 }
176 }
177
178 void initialize(uint max_regions) {
179 assert(_iter_states == NULL, "Must not be initialized twice");
180 assert(_iter_claims == NULL, "Must not be initialized twice");
181 _max_regions = max_regions;
182 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
183 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
184 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
185 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC);
186 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC);
187 }
188
189 void reset() {
190 for (uint i = 0; i < _max_regions; i++) {
191 _iter_states[i] = Unclaimed;
192 _scan_top[i] = NULL;
193 }
194
195 G1ResetScanTopClosure cl(_scan_top);
196 G1CollectedHeap::heap()->heap_region_iterate(&cl);
197
198 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t));
199 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState));
200 _cur_dirty_region = 0;
201 }
202
203 // Attempt to claim the remembered set of the region for iteration. Returns true
204 // if this call caused the transition from Unclaimed to Claimed.
205 inline bool claim_iter(uint region) {
206 assert(region < _max_regions, "Tried to access invalid region %u", region);
207 if (_iter_states[region] != Unclaimed) {
208 return false;
209 }
210 G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed);
211 return (res == Unclaimed);
212 }
332
333 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
334 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
335 _scan_objs_on_card_cl->set_region(card_region);
336 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
337 _scan_objs_on_card_cl->trim_queue_partially();
338 _cards_scanned++;
339 }
340
341 void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
342 EventGCPhaseParallel event;
343 uint const region_idx = r->hrm_index();
344
345 if (_scan_state->claim_iter(region_idx)) {
346 // If we ever free the collection set concurrently, we should also
347 // clear the card table concurrently therefore we won't need to
348 // add regions of the collection set to the dirty cards region.
349 _scan_state->add_dirty_region(region_idx);
350 }
351
352 if (r->rem_set()->cardset_is_empty()) {
353 return;
354 }
355
356 // We claim cards in blocks so as to reduce the contention.
357 size_t const block_size = G1RSetScanBlockSize;
358
359 HeapRegionRemSetIterator iter(r->rem_set());
360 size_t card_index;
361
362 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size);
363 for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
364 if (current_card >= claimed_card_block + block_size) {
365 claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size);
366 }
367 if (current_card < claimed_card_block) {
368 _cards_skipped++;
369 continue;
370 }
371 _cards_claimed++;
372
373 HeapWord* const card_start = _g1h->bot()->address_for_index_raw(card_index);
374 uint const region_idx_for_card = _g1h->addr_to_region(card_start);
375
376 #ifdef ASSERT
377 HeapRegion* hr = _g1h->region_at_or_null(region_idx_for_card);
378 assert(hr == NULL || hr->is_in_reserved(card_start),
379 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index());
380 #endif
381 HeapWord* const top = _scan_state->scan_top(region_idx_for_card);
382 if (card_start >= top) {
383 continue;
384 }
385
386 // If the card is dirty, then G1 will scan it during Update RS.
387 if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
388 continue;
389 }
390
391 // We claim lazily (so races are possible but they're benign), which reduces the
392 // number of duplicate scans (the rsets of the regions in the cset can intersect).
393 // Claim the card after checking bounds above: the remembered set may contain
394 // random cards into current survivor, and we would then have an incorrectly
395 // claimed card in survivor space. Card table clear does not reset the card table
396 // of survivor space regions.
397 claim_card(card_index, region_idx_for_card);
398
399 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
400
401 scan_card(mr, region_idx_for_card);
402 }
403 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanRS));
404 }
405
406 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
407 EventGCPhaseParallel event;
408 r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
409 event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots));
410 }
534 _scan_state->clear_card_table(_g1h->workers());
535 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
536 }
537
538 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
539 #ifdef ASSERT
540 G1CollectedHeap* g1h = G1CollectedHeap::heap();
541 assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
542 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
543 p2i(card_ptr),
544 ct->index_for(ct->addr_for(card_ptr)),
545 p2i(ct->addr_for(card_ptr)),
546 g1h->addr_to_region(ct->addr_for(card_ptr)));
547 #endif
548 }
549
550 void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
551 uint worker_i) {
552 assert(!_g1h->is_gc_active(), "Only call concurrently");
553
554 // Construct the region representing the card.
555 HeapWord* start = _ct->addr_for(card_ptr);
556 // And find the region containing it.
557 HeapRegion* r = _g1h->heap_region_containing_or_null(start);
558
559 // If this is a (stale) card into an uncommitted region, exit.
560 if (r == NULL) {
561 return;
562 }
563
564 check_card_ptr(card_ptr, _ct);
565
566 // If the card is no longer dirty, nothing to do.
567 if (*card_ptr != G1CardTable::dirty_card_val()) {
568 return;
569 }
570
571 // This check is needed for some uncommon cases where we should
572 // ignore the card.
573 //
574 // The region could be young. Cards for young regions are
575 // distinctly marked (set to g1_young_gen), so the post-barrier will
576 // filter them out. However, that marking is performed
577 // concurrently. A write to a young object could occur before the
578 // card has been marked young, slipping past the filter.
579 //
580 // The card could be stale, because the region has been freed since
581 // the card was recorded. In this case the region type could be
582 // anything. If (still) free or (reallocated) young, just ignore
583 // it. If (reallocated) old or humongous, the later card trimming
584 // and additional checks in iteration may detect staleness. At
585 // worst, we end up processing a stale card unnecessarily.
586 //
587 // In the normal (non-stale) case, the synchronization between the
588 // enqueueing of the card and processing it here will have ensured
589 // we see the up-to-date region type here.
590 if (!r->is_old_or_humongous_or_archive()) {
673 if (!card_processed) {
674 // The card might have gotten re-dirtied and re-enqueued while we
675 // worked. (In fact, it's pretty likely.)
676 if (*card_ptr != G1CardTable::dirty_card_val()) {
677 *card_ptr = G1CardTable::dirty_card_val();
678 MutexLockerEx x(Shared_DirtyCardQ_lock,
679 Mutex::_no_safepoint_check_flag);
680 DirtyCardQueue* sdcq =
681 G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue();
682 sdcq->enqueue(card_ptr);
683 }
684 } else {
685 _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
686 }
687 }
688
689 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
690 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
691 assert(_g1h->is_gc_active(), "Only call during GC");
692
693 // Construct the region representing the card.
694 HeapWord* card_start = _ct->addr_for(card_ptr);
695 // And find the region containing it.
696 uint const card_region_idx = _g1h->addr_to_region(card_start);
697
698 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx);
699 if (scan_limit == NULL) {
700 // This is a card into an uncommitted region. We need to bail out early as we
701 // should not access the corresponding card table entry.
702 return false;
703 }
704
705 check_card_ptr(card_ptr, _ct);
706
707 // If the card is no longer dirty, nothing to do. This covers cards that were already
708 // scanned as parts of the remembered sets.
709 if (*card_ptr != G1CardTable::dirty_card_val()) {
710 return false;
711 }
712
713 // We claim lazily (so races are possible but they're benign), which reduces the
714 // number of potential duplicate scans (multiple threads may enqueue the same card twice).
715 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
716
717 _scan_state->add_dirty_region(card_region_idx);
718 if (scan_limit <= card_start) {
719 // If the card starts above the area in the region containing objects to scan, skip it.
720 return false;
721 }
722
723 // Don't use addr_for(card_ptr + 1) which can ask for
724 // a card beyond the heap.
725 HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
726 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
727 assert(!dirty_region.is_empty(), "sanity");
728
729 HeapRegion* const card_region = _g1h->region_at(card_region_idx);
730 update_rs_cl->set_region(card_region);
731 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl);
732 assert(card_processed, "must be");
733 return true;
734 }
735
736 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
737 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
|