178 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
179 }
180
181 bool doHeapRegion(HeapRegion* r) {
182 assert(r->in_collection_set(), "should only be called on elements of CS.");
183 HeapRegionRemSet* hrrs = r->rem_set();
184 if (hrrs->iter_is_complete()) return false; // All done.
185 if (!_try_claimed && !hrrs->claim_iter()) return false;
186 // If we ever free the collection set concurrently, we should also
187 // clear the card table concurrently therefore we won't need to
188 // add regions of the collection set to the dirty cards region.
189 _g1h->push_dirty_cards_region(r);
190 // If we didn't return above, then
191 // _try_claimed || r->claim_iter()
192 // is true: either we're supposed to work on claimed-but-not-complete
193 // regions, or we successfully claimed the region.
194
195 HeapRegionRemSetIterator iter(hrrs);
196 size_t card_index;
197
198 // We claim cards in block so as to recude the contention. The block size is determined by
199 // the G1RSetScanBlockSize parameter.
200 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
201 for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
202 if (current_card >= jump_to_card + _block_size) {
203 jump_to_card = hrrs->iter_claimed_next(_block_size);
204 }
205 if (current_card < jump_to_card) continue;
206 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
207 #if 0
208 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
209 card_start, card_start + CardTableModRefBS::card_size_in_words);
210 #endif
211
212 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
213 assert(card_region != NULL, "Yielding cards not in the heap?");
214 _cards++;
215
216 if (!card_region->is_on_dirty_cards_region_list()) {
217 _g1h->push_dirty_cards_region(card_region);
218 }
570 return false; // Not in the G1 heap (might be in perm, for example.)
571 }
572
573 // Why do we have to check here whether a card is on a young region,
574 // given that we dirty young regions and, as a result, the
575 // post-barrier is supposed to filter them out and never to enqueue
576 // them? When we allocate a new region as the "allocation region" we
577 // actually dirty its cards after we release the lock, since card
578 // dirtying while holding the lock was a performance bottleneck. So,
579 // as a result, it is possible for other threads to actually
580 // allocate objects in the region (after the acquire the lock)
581 // before all the cards on the region are dirtied. This is unlikely,
582 // and it doesn't happen often, but it can happen. So, the extra
583 // check below filters out those cards.
584 if (r->is_young()) {
585 return false;
586 }
587
588 // While we are processing RSet buffers during the collection, we
589 // actually don't want to scan any cards on the collection set,
590 // since we don't want to update remebered sets with entries that
591 // point into the collection set, given that live objects from the
592 // collection set are about to move and such entries will be stale
593 // very soon. This change also deals with a reliability issue which
594 // involves scanning a card in the collection set and coming across
595 // an array that was being chunked and looking malformed. Note,
596 // however, that if evacuation fails, we have to scan any objects
597 // that were not moved and create any missing entries.
598 if (r->in_collection_set()) {
599 return false;
600 }
601
602 // The result from the hot card cache insert call is either:
603 // * pointer to the current card
604 // (implying that the current card is not 'hot'),
605 // * null
606 // (meaning we had inserted the card ptr into the "hot" card cache,
607 // which had some headroom),
608 // * a pointer to a "hot" card that was evicted from the "hot" cache.
609 //
610
|
178 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
179 }
180
181 bool doHeapRegion(HeapRegion* r) {
182 assert(r->in_collection_set(), "should only be called on elements of CS.");
183 HeapRegionRemSet* hrrs = r->rem_set();
184 if (hrrs->iter_is_complete()) return false; // All done.
185 if (!_try_claimed && !hrrs->claim_iter()) return false;
186 // If we ever free the collection set concurrently, we should also
187 // clear the card table concurrently therefore we won't need to
188 // add regions of the collection set to the dirty cards region.
189 _g1h->push_dirty_cards_region(r);
190 // If we didn't return above, then
191 // _try_claimed || r->claim_iter()
192 // is true: either we're supposed to work on claimed-but-not-complete
193 // regions, or we successfully claimed the region.
194
195 HeapRegionRemSetIterator iter(hrrs);
196 size_t card_index;
197
198 // We claim cards in block so as to reduce the contention. The block size is determined by
199 // the G1RSetScanBlockSize parameter.
200 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
201 for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
202 if (current_card >= jump_to_card + _block_size) {
203 jump_to_card = hrrs->iter_claimed_next(_block_size);
204 }
205 if (current_card < jump_to_card) continue;
206 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
207 #if 0
208 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
209 card_start, card_start + CardTableModRefBS::card_size_in_words);
210 #endif
211
212 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
213 assert(card_region != NULL, "Yielding cards not in the heap?");
214 _cards++;
215
216 if (!card_region->is_on_dirty_cards_region_list()) {
217 _g1h->push_dirty_cards_region(card_region);
218 }
570 return false; // Not in the G1 heap (might be in perm, for example.)
571 }
572
573 // Why do we have to check here whether a card is on a young region,
574 // given that we dirty young regions and, as a result, the
575 // post-barrier is supposed to filter them out and never to enqueue
576 // them? When we allocate a new region as the "allocation region" we
577 // actually dirty its cards after we release the lock, since card
578 // dirtying while holding the lock was a performance bottleneck. So,
579 // as a result, it is possible for other threads to actually
580 // allocate objects in the region (after the acquire the lock)
581 // before all the cards on the region are dirtied. This is unlikely,
582 // and it doesn't happen often, but it can happen. So, the extra
583 // check below filters out those cards.
584 if (r->is_young()) {
585 return false;
586 }
587
588 // While we are processing RSet buffers during the collection, we
589 // actually don't want to scan any cards on the collection set,
590 // since we don't want to update remembered sets with entries that
591 // point into the collection set, given that live objects from the
592 // collection set are about to move and such entries will be stale
593 // very soon. This change also deals with a reliability issue which
594 // involves scanning a card in the collection set and coming across
595 // an array that was being chunked and looking malformed. Note,
596 // however, that if evacuation fails, we have to scan any objects
597 // that were not moved and create any missing entries.
598 if (r->in_collection_set()) {
599 return false;
600 }
601
602 // The result from the hot card cache insert call is either:
603 // * pointer to the current card
604 // (implying that the current card is not 'hot'),
605 // * null
606 // (meaning we had inserted the card ptr into the "hot" card cache,
607 // which had some headroom),
608 // * a pointer to a "hot" card that was evicted from the "hot" cache.
609 //
610
|