259 }
260
261 size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size();
262 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
263 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion;
264
265 // Iterate over the dirty cards region list.
266 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length);
267
268 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " "
269 "units of work for " SIZE_FORMAT " regions.",
270 cl.name(), num_workers, num_chunks, _cur_dirty_region);
271 workers->run_task(&cl, num_workers);
272
273 #ifndef PRODUCT
274 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
275 #endif
276 }
277 };
278
279 G1RemSet::G1RemSet(G1CollectedHeap* g1,
280 G1CardTable* ct,
281 G1HotCardCache* hot_card_cache) :
282 _g1(g1),
283 _scan_state(new G1RemSetScanState()),
284 _num_conc_refined_cards(0),
285 _ct(ct),
286 _g1p(_g1->g1_policy()),
287 _hot_card_cache(hot_card_cache),
288 _prev_period_summary() {
289 }
290
291 G1RemSet::~G1RemSet() {
292 if (_scan_state != NULL) {
293 delete _scan_state;
294 }
295 }
296
297 uint G1RemSet::num_par_rem_sets() {
298 return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
299 }
300
301 void G1RemSet::initialize(size_t capacity, uint max_regions) {
302 G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
303 _scan_state->initialize(max_regions);
304 }
305
306 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
391 // claimed card in survivor space. Card table clear does not reset the card table
392 // of survivor space regions.
393 claim_card(card_index, region_idx_for_card);
394
395 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
396
397 scan_card(mr, region_idx_for_card);
398 }
399 if (_scan_state->set_iter_complete(region_idx)) {
400 // Scan the strong code root list attached to the current region
401 scan_strong_code_roots(r);
402 }
403 return false;
404 }
405
406 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
407 CodeBlobClosure* heap_region_codeblobs,
408 uint worker_i) {
409 double rs_time_start = os::elapsedTime();
410
411 G1ScanObjsDuringScanRSClosure scan_cl(_g1, pss);
412 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, heap_region_codeblobs, worker_i);
413 _g1->collection_set_iterate_from(&cl, worker_i);
414
415 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
416 cl.strong_code_root_scan_time_sec();
417
418 G1GCPhaseTimes* p = _g1p->phase_times();
419
420 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
421 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
422 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
423 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
424
425 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec());
426 }
427
428 // Closure used for updating rem sets. Only called during an evacuation pause.
429 class G1RefineCardClosure: public CardTableEntryClosure {
430 G1RemSet* _g1rs;
431 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
432
433 size_t _cards_scanned;
442 // contain references that point into the collection set
443 // is during RSet updating within an evacuation pause.
444 // In this case worker_i should be the id of a GC worker thread.
445 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
446
447 bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl);
448
449 if (card_scanned) {
450 _cards_scanned++;
451 } else {
452 _cards_skipped++;
453 }
454 return true;
455 }
456
457 size_t cards_scanned() const { return _cards_scanned; }
458 size_t cards_skipped() const { return _cards_skipped; }
459 };
460
461 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
462 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1, pss, worker_i);
463 G1RefineCardClosure refine_card_cl(_g1, &update_rs_cl);
464
465 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
466 if (G1HotCardCache::default_use_cache()) {
467 // Apply the closure to the entries of the hot card cache.
468 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
469 _g1->iterate_hcc_closure(&refine_card_cl, worker_i);
470 }
471 // Apply the closure to all remaining log entries.
472 _g1->iterate_dirty_card_closure(&refine_card_cl, worker_i);
473
474 G1GCPhaseTimes* p = _g1p->phase_times();
475 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
476 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
477 }
478
479 void G1RemSet::cleanupHRRS() {
480 HeapRegionRemSet::cleanup();
481 }
482
483 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss,
484 CodeBlobClosure* heap_region_codeblobs,
485 uint worker_i) {
486 update_rem_set(pss, worker_i);
487 scan_rem_set(pss, heap_region_codeblobs, worker_i);;
488 }
489
490 void G1RemSet::prepare_for_oops_into_collection_set_do() {
491 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
492 dcqs.concatenate_logs();
493
494 _scan_state->reset();
495 }
496
497 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
498 G1GCPhaseTimes* phase_times = _g1->g1_policy()->phase_times();
499
500 // Set all cards back to clean.
501 double start = os::elapsedTime();
502 _scan_state->clear_card_table(_g1->workers());
503 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
504 }
505
506 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
507 #ifdef ASSERT
508 G1CollectedHeap* g1h = G1CollectedHeap::heap();
509 assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
510 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
511 p2i(card_ptr),
512 ct->index_for(ct->addr_for(card_ptr)),
513 p2i(ct->addr_for(card_ptr)),
514 g1h->addr_to_region(ct->addr_for(card_ptr)));
515 #endif
516 }
517
518 void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
519 uint worker_i) {
520 assert(!_g1->is_gc_active(), "Only call concurrently");
521
522 check_card_ptr(card_ptr, _ct);
523
524 // If the card is no longer dirty, nothing to do.
525 if (*card_ptr != G1CardTable::dirty_card_val()) {
526 return;
527 }
528
529 // Construct the region representing the card.
530 HeapWord* start = _ct->addr_for(card_ptr);
531 // And find the region containing it.
532 HeapRegion* r = _g1->heap_region_containing(start);
533
534 // This check is needed for some uncommon cases where we should
535 // ignore the card.
536 //
537 // The region could be young. Cards for young regions are
538 // distinctly marked (set to g1_young_gen), so the post-barrier will
539 // filter them out. However, that marking is performed
540 // concurrently. A write to a young object could occur before the
541 // card has been marked young, slipping past the filter.
542 //
543 // The card could be stale, because the region has been freed since
544 // the card was recorded. In this case the region type could be
545 // anything. If (still) free or (reallocated) young, just ignore
546 // it. If (reallocated) old or humongous, the later card trimming
547 // and additional checks in iteration may detect staleness. At
548 // worst, we end up processing a stale card unnecessarily.
549 //
550 // In the normal (non-stale) case, the synchronization between the
551 // enqueueing of the card and processing it here will have ensured
552 // we see the up-to-date region type here.
557 // The result from the hot card cache insert call is either:
558 // * pointer to the current card
559 // (implying that the current card is not 'hot'),
560 // * null
561 // (meaning we had inserted the card ptr into the "hot" card cache,
562 // which had some headroom),
563 // * a pointer to a "hot" card that was evicted from the "hot" cache.
564 //
565
566 if (_hot_card_cache->use_cache()) {
567 assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
568
569 const jbyte* orig_card_ptr = card_ptr;
570 card_ptr = _hot_card_cache->insert(card_ptr);
571 if (card_ptr == NULL) {
572 // There was no eviction. Nothing to do.
573 return;
574 } else if (card_ptr != orig_card_ptr) {
575 // Original card was inserted and an old card was evicted.
576 start = _ct->addr_for(card_ptr);
577 r = _g1->heap_region_containing(start);
578
579 // Check whether the region formerly in the cache should be
580 // ignored, as discussed earlier for the original card. The
581 // region could have been freed while in the cache.
582 if (!r->is_old_or_humongous()) {
583 return;
584 }
585 } // Else we still have the original card.
586 }
587
588 // Trim the region designated by the card to what's been allocated
589 // in the region. The card could be stale, or the card could cover
590 // (part of) an object at the end of the allocated space and extend
591 // beyond the end of allocation.
592
593 // Non-humongous objects are only allocated in the old-gen during
594 // GC, so if region is old then top is stable. Humongous object
595 // allocation sets top last; if top has not yet been set, this is
596 // a stale card and we'll end up with an empty intersection. If
597 // this is not a stale card, the synchronization between the
606
607 // Okay to clean and process the card now. There are still some
608 // stale card cases that may be detected by iteration and dealt with
609 // as iteration failure.
610 *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
611
612 // This fence serves two purposes. First, the card must be cleaned
613 // before processing the contents. Second, we can't proceed with
614 // processing until after the read of top, for synchronization with
615 // possibly concurrent humongous object allocation. It's okay that
616 // reading top and reading type were racy wrto each other. We need
617 // both set, in any order, to proceed.
618 OrderAccess::fence();
619
620 // Don't use addr_for(card_ptr + 1) which can ask for
621 // a card beyond the heap.
622 HeapWord* end = start + G1CardTable::card_size_in_words;
623 MemRegion dirty_region(start, MIN2(scan_limit, end));
624 assert(!dirty_region.is_empty(), "sanity");
625
626 G1ConcurrentRefineOopClosure conc_refine_cl(_g1, worker_i);
627
628 bool card_processed =
629 r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl);
630
631 // If unable to process the card then we encountered an unparsable
632 // part of the heap (e.g. a partially allocated object) while
633 // processing a stale card. Despite the card being stale, redirty
634 // and re-enqueue, because we've already cleaned the card. Without
635 // this we could incorrectly discard a non-stale card.
636 if (!card_processed) {
637 // The card might have gotten re-dirtied and re-enqueued while we
638 // worked. (In fact, it's pretty likely.)
639 if (*card_ptr != G1CardTable::dirty_card_val()) {
640 *card_ptr = G1CardTable::dirty_card_val();
641 MutexLockerEx x(Shared_DirtyCardQ_lock,
642 Mutex::_no_safepoint_check_flag);
643 DirtyCardQueue* sdcq =
644 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
645 sdcq->enqueue(card_ptr);
646 }
647 } else {
648 _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
649 }
650 }
651
652 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
653 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
654 assert(_g1->is_gc_active(), "Only call during GC");
655
656 check_card_ptr(card_ptr, _ct);
657
658 // If the card is no longer dirty, nothing to do. This covers cards that were already
659 // scanned as parts of the remembered sets.
660 if (*card_ptr != G1CardTable::dirty_card_val()) {
661 return false;
662 }
663
664 // We claim lazily (so races are possible but they're benign), which reduces the
665 // number of potential duplicate scans (multiple threads may enqueue the same card twice).
666 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
667
668 // Construct the region representing the card.
669 HeapWord* card_start = _ct->addr_for(card_ptr);
670 // And find the region containing it.
671 uint const card_region_idx = _g1->addr_to_region(card_start);
672
673 _scan_state->add_dirty_region(card_region_idx);
674 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx);
675 if (scan_limit <= card_start) {
676 // If the card starts above the area in the region containing objects to scan, skip it.
677 return false;
678 }
679
680 // Don't use addr_for(card_ptr + 1) which can ask for
681 // a card beyond the heap.
682 HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
683 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
684 assert(!dirty_region.is_empty(), "sanity");
685
686 HeapRegion* const card_region = _g1->region_at(card_region_idx);
687 update_rs_cl->set_region(card_region);
688 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl);
689 assert(card_processed, "must be");
690 return true;
691 }
692
693 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
694 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
695 (period_count % G1SummarizeRSetStatsPeriod == 0)) {
696
697 G1RemSetSummary current(this);
698 _prev_period_summary.subtract_from(¤t);
699
700 Log(gc, remset) log;
701 log.trace("%s", header);
702 ResourceMark rm;
703 LogStream ls(log.trace());
704 _prev_period_summary.print_on(&ls);
705
706 _prev_period_summary.set(¤t);
|
259 }
260
261 size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size();
262 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
263 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion;
264
265 // Iterate over the dirty cards region list.
266 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length);
267
268 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " "
269 "units of work for " SIZE_FORMAT " regions.",
270 cl.name(), num_workers, num_chunks, _cur_dirty_region);
271 workers->run_task(&cl, num_workers);
272
273 #ifndef PRODUCT
274 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
275 #endif
276 }
277 };
278
279 G1RemSet::G1RemSet(G1CollectedHeap* g1h,
280 G1CardTable* ct,
281 G1HotCardCache* hot_card_cache) :
282 _g1h(g1h),
283 _scan_state(new G1RemSetScanState()),
284 _num_conc_refined_cards(0),
285 _ct(ct),
286 _g1p(_g1h->g1_policy()),
287 _hot_card_cache(hot_card_cache),
288 _prev_period_summary() {
289 }
290
291 G1RemSet::~G1RemSet() {
292 if (_scan_state != NULL) {
293 delete _scan_state;
294 }
295 }
296
297 uint G1RemSet::num_par_rem_sets() {
298 return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads);
299 }
300
301 void G1RemSet::initialize(size_t capacity, uint max_regions) {
302 G1FromCardCache::initialize(num_par_rem_sets(), max_regions);
303 _scan_state->initialize(max_regions);
304 }
305
306 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
391 // claimed card in survivor space. Card table clear does not reset the card table
392 // of survivor space regions.
393 claim_card(card_index, region_idx_for_card);
394
395 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top));
396
397 scan_card(mr, region_idx_for_card);
398 }
399 if (_scan_state->set_iter_complete(region_idx)) {
400 // Scan the strong code root list attached to the current region
401 scan_strong_code_roots(r);
402 }
403 return false;
404 }
405
406 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
407 CodeBlobClosure* heap_region_codeblobs,
408 uint worker_i) {
409 double rs_time_start = os::elapsedTime();
410
411 G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
412 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, heap_region_codeblobs, worker_i);
413 _g1h->collection_set_iterate_from(&cl, worker_i);
414
415 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
416 cl.strong_code_root_scan_time_sec();
417
418 G1GCPhaseTimes* p = _g1p->phase_times();
419
420 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
421 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
422 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
423 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
424
425 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec());
426 }
427
428 // Closure used for updating rem sets. Only called during an evacuation pause.
429 class G1RefineCardClosure: public CardTableEntryClosure {
430 G1RemSet* _g1rs;
431 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
432
433 size_t _cards_scanned;
442 // contain references that point into the collection set
443 // is during RSet updating within an evacuation pause.
444 // In this case worker_i should be the id of a GC worker thread.
445 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
446
447 bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl);
448
449 if (card_scanned) {
450 _cards_scanned++;
451 } else {
452 _cards_skipped++;
453 }
454 return true;
455 }
456
457 size_t cards_scanned() const { return _cards_scanned; }
458 size_t cards_skipped() const { return _cards_skipped; }
459 };
460
461 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
462 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss, worker_i);
463 G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
464
465 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
466 if (G1HotCardCache::default_use_cache()) {
467 // Apply the closure to the entries of the hot card cache.
468 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
469 _g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
470 }
471 // Apply the closure to all remaining log entries.
472 _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
473
474 G1GCPhaseTimes* p = _g1p->phase_times();
475 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
476 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
477 }
478
479 void G1RemSet::cleanupHRRS() {
480 HeapRegionRemSet::cleanup();
481 }
482
483 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss,
484 CodeBlobClosure* heap_region_codeblobs,
485 uint worker_i) {
486 update_rem_set(pss, worker_i);
487 scan_rem_set(pss, heap_region_codeblobs, worker_i);;
488 }
489
490 void G1RemSet::prepare_for_oops_into_collection_set_do() {
491 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
492 dcqs.concatenate_logs();
493
494 _scan_state->reset();
495 }
496
497 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
498 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
499
500 // Set all cards back to clean.
501 double start = os::elapsedTime();
502 _scan_state->clear_card_table(_g1h->workers());
503 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
504 }
505
506 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
507 #ifdef ASSERT
508 G1CollectedHeap* g1h = G1CollectedHeap::heap();
509 assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
510 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
511 p2i(card_ptr),
512 ct->index_for(ct->addr_for(card_ptr)),
513 p2i(ct->addr_for(card_ptr)),
514 g1h->addr_to_region(ct->addr_for(card_ptr)));
515 #endif
516 }
517
518 void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
519 uint worker_i) {
520 assert(!_g1h->is_gc_active(), "Only call concurrently");
521
522 check_card_ptr(card_ptr, _ct);
523
524 // If the card is no longer dirty, nothing to do.
525 if (*card_ptr != G1CardTable::dirty_card_val()) {
526 return;
527 }
528
529 // Construct the region representing the card.
530 HeapWord* start = _ct->addr_for(card_ptr);
531 // And find the region containing it.
532 HeapRegion* r = _g1h->heap_region_containing(start);
533
534 // This check is needed for some uncommon cases where we should
535 // ignore the card.
536 //
537 // The region could be young. Cards for young regions are
538 // distinctly marked (set to g1_young_gen), so the post-barrier will
539 // filter them out. However, that marking is performed
540 // concurrently. A write to a young object could occur before the
541 // card has been marked young, slipping past the filter.
542 //
543 // The card could be stale, because the region has been freed since
544 // the card was recorded. In this case the region type could be
545 // anything. If (still) free or (reallocated) young, just ignore
546 // it. If (reallocated) old or humongous, the later card trimming
547 // and additional checks in iteration may detect staleness. At
548 // worst, we end up processing a stale card unnecessarily.
549 //
550 // In the normal (non-stale) case, the synchronization between the
551 // enqueueing of the card and processing it here will have ensured
552 // we see the up-to-date region type here.
557 // The result from the hot card cache insert call is either:
558 // * pointer to the current card
559 // (implying that the current card is not 'hot'),
560 // * null
561 // (meaning we had inserted the card ptr into the "hot" card cache,
562 // which had some headroom),
563 // * a pointer to a "hot" card that was evicted from the "hot" cache.
564 //
565
566 if (_hot_card_cache->use_cache()) {
567 assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
568
569 const jbyte* orig_card_ptr = card_ptr;
570 card_ptr = _hot_card_cache->insert(card_ptr);
571 if (card_ptr == NULL) {
572 // There was no eviction. Nothing to do.
573 return;
574 } else if (card_ptr != orig_card_ptr) {
575 // Original card was inserted and an old card was evicted.
576 start = _ct->addr_for(card_ptr);
577 r = _g1h->heap_region_containing(start);
578
579 // Check whether the region formerly in the cache should be
580 // ignored, as discussed earlier for the original card. The
581 // region could have been freed while in the cache.
582 if (!r->is_old_or_humongous()) {
583 return;
584 }
585 } // Else we still have the original card.
586 }
587
588 // Trim the region designated by the card to what's been allocated
589 // in the region. The card could be stale, or the card could cover
590 // (part of) an object at the end of the allocated space and extend
591 // beyond the end of allocation.
592
593 // Non-humongous objects are only allocated in the old-gen during
594 // GC, so if region is old then top is stable. Humongous object
595 // allocation sets top last; if top has not yet been set, this is
596 // a stale card and we'll end up with an empty intersection. If
597 // this is not a stale card, the synchronization between the
606
607 // Okay to clean and process the card now. There are still some
608 // stale card cases that may be detected by iteration and dealt with
609 // as iteration failure.
610 *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
611
612 // This fence serves two purposes. First, the card must be cleaned
613 // before processing the contents. Second, we can't proceed with
614 // processing until after the read of top, for synchronization with
615 // possibly concurrent humongous object allocation. It's okay that
616 // reading top and reading type were racy wrto each other. We need
617 // both set, in any order, to proceed.
618 OrderAccess::fence();
619
620 // Don't use addr_for(card_ptr + 1) which can ask for
621 // a card beyond the heap.
622 HeapWord* end = start + G1CardTable::card_size_in_words;
623 MemRegion dirty_region(start, MIN2(scan_limit, end));
624 assert(!dirty_region.is_empty(), "sanity");
625
626 G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i);
627
628 bool card_processed =
629 r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl);
630
631 // If unable to process the card then we encountered an unparsable
632 // part of the heap (e.g. a partially allocated object) while
633 // processing a stale card. Despite the card being stale, redirty
634 // and re-enqueue, because we've already cleaned the card. Without
635 // this we could incorrectly discard a non-stale card.
636 if (!card_processed) {
637 // The card might have gotten re-dirtied and re-enqueued while we
638 // worked. (In fact, it's pretty likely.)
639 if (*card_ptr != G1CardTable::dirty_card_val()) {
640 *card_ptr = G1CardTable::dirty_card_val();
641 MutexLockerEx x(Shared_DirtyCardQ_lock,
642 Mutex::_no_safepoint_check_flag);
643 DirtyCardQueue* sdcq =
644 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
645 sdcq->enqueue(card_ptr);
646 }
647 } else {
648 _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
649 }
650 }
651
652 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
653 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
654 assert(_g1h->is_gc_active(), "Only call during GC");
655
656 check_card_ptr(card_ptr, _ct);
657
658 // If the card is no longer dirty, nothing to do. This covers cards that were already
659 // scanned as parts of the remembered sets.
660 if (*card_ptr != G1CardTable::dirty_card_val()) {
661 return false;
662 }
663
664 // We claim lazily (so races are possible but they're benign), which reduces the
665 // number of potential duplicate scans (multiple threads may enqueue the same card twice).
666 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
667
668 // Construct the region representing the card.
669 HeapWord* card_start = _ct->addr_for(card_ptr);
670 // And find the region containing it.
671 uint const card_region_idx = _g1h->addr_to_region(card_start);
672
673 _scan_state->add_dirty_region(card_region_idx);
674 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx);
675 if (scan_limit <= card_start) {
676 // If the card starts above the area in the region containing objects to scan, skip it.
677 return false;
678 }
679
680 // Don't use addr_for(card_ptr + 1) which can ask for
681 // a card beyond the heap.
682 HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
683 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
684 assert(!dirty_region.is_empty(), "sanity");
685
686 HeapRegion* const card_region = _g1h->region_at(card_region_idx);
687 update_rs_cl->set_region(card_region);
688 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl);
689 assert(card_processed, "must be");
690 return true;
691 }
692
693 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
694 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
695 (period_count % G1SummarizeRSetStatsPeriod == 0)) {
696
697 G1RemSetSummary current(this);
698 _prev_period_summary.subtract_from(¤t);
699
700 Log(gc, remset) log;
701 log.trace("%s", header);
702 ResourceMark rm;
703 LogStream ls(log.trace());
704 _prev_period_summary.print_on(&ls);
705
706 _prev_period_summary.set(¤t);
|