src/share/vm/gc_implementation/g1/g1RemSet.cpp

Print this page




 699 
 700 class ConcRefineRegionClosure: public HeapRegionClosure {
 701   G1CollectedHeap* _g1h;
 702   CardTableModRefBS* _ctbs;
 703   ConcurrentGCThread* _cgc_thrd;
 704   ConcurrentG1Refine* _cg1r;
 705   unsigned _cards_processed;
 706   UpdateRSOopClosure _update_rs_oop_cl;
 707 public:
 708   ConcRefineRegionClosure(CardTableModRefBS* ctbs,
 709                           ConcurrentG1Refine* cg1r,
 710                           HRInto_G1RemSet* g1rs) :
 711     _ctbs(ctbs), _cg1r(cg1r), _cgc_thrd(cg1r->cg1rThread()),
 712     _update_rs_oop_cl(g1rs), _cards_processed(0),
 713     _g1h(G1CollectedHeap::heap())
 714   {}
 715 
 716   bool doHeapRegion(HeapRegion* r) {
 717     if (!r->in_collection_set() &&
 718         !r->continuesHumongous() &&
 719         !r->is_young() &&
 720         !r->is_survivor()) {
 721       _update_rs_oop_cl.set_from(r);
 722       UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
 723 
 724       // For each run of dirty card in the region:
 725       //   1) Clear the cards.
 726       //   2) Process the range corresponding to the run, adding any
 727       //      necessary RS entries.
 728       // 1 must precede 2, so that a concurrent modification redirties the
 729       // card.  If a processing attempt does not succeed, because it runs
 730       // into an unparseable region, we will do binary search to find the
 731       // beginning of the next parseable region.
 732       HeapWord* startAddr = r->bottom();
 733       HeapWord* endAddr = r->used_region().end();
 734       HeapWord* lastAddr;
 735       HeapWord* nextAddr;
 736 
 737       for (nextAddr = lastAddr = startAddr;
 738            nextAddr < endAddr;
 739            nextAddr = lastAddr) {
 740         MemRegion dirtyRegion;


 837 
 838   // Construct the region representing the card.
 839   HeapWord* start = _ct_bs->addr_for(card_ptr);
 840   // And find the region containing it.
 841   HeapRegion* r = _g1->heap_region_containing(start);
 842   if (r == NULL) {
 843     guarantee(_g1->is_in_permanent(start), "Or else where?");
 844     return;  // Not in the G1 heap (might be in perm, for example.)
 845   }
 846   // Why do we have to check here whether a card is on a young region,
 847   // given that we dirty young regions and, as a result, the
 848   // post-barrier is supposed to filter them out and never to enqueue
 849   // them? When we allocate a new region as the "allocation region" we
 850   // actually dirty its cards after we release the lock, since card
 851   // dirtying while holding the lock was a performance bottleneck. So,
 852   // as a result, it is possible for other threads to actually
 853   // allocate objects in the region (after the acquire the lock)
 854   // before all the cards on the region are dirtied. This is unlikely,
 855   // and it doesn't happen often, but it can happen. So, the extra
 856   // check below filters out those cards.
 857   if (r->is_young() || r->is_survivor()) {
 858     return;
 859   }
 860   // While we are processing RSet buffers during the collection, we
 861   // actually don't want to scan any cards on the collection set,
 862   // since we don't want to update remebered sets with entries that
 863   // point into the collection set, given that live objects from the
 864   // collection set are about to move and such entries will be stale
 865   // very soon. This change also deals with a reliability issue which
 866   // involves scanning a card in the collection set and coming across
 867   // an array that was being chunked and looking malformed. Note,
 868   // however, that if evacuation fails, we have to scan any objects
 869   // that were not moved and create any missing entries.
 870   if (r->in_collection_set()) {
 871     return;
 872   }
 873 
 874   // Should we defer it?
 875   if (_cg1r->use_cache()) {
 876     card_ptr = _cg1r->cache_insert(card_ptr);
 877     // If it was not an eviction, nothing to do.


1008                            "  Max = " SIZE_FORMAT "K.",
1009                            blk.total_mem_sz()/K, blk.max_mem_sz()/K);
1010     gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
1011                            " free_lists = " SIZE_FORMAT "K.",
1012                            HeapRegionRemSet::static_mem_size()/K,
1013                            HeapRegionRemSet::fl_mem_size()/K);
1014     gclog_or_tty->print_cr("    %d occupied cards represented.",
1015                            blk.occupied());
1016     gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
1017                            " %s, cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
1018                            blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
1019                            (blk.max_mem_sz_region()->popular() ? "POP" : ""),
1020                            (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
1021                            (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
1022     gclog_or_tty->print_cr("    Did %d coarsenings.",
1023                   HeapRegionRemSet::n_coarsenings());
1024 
1025   }
1026 }
1027 void HRInto_G1RemSet::prepare_for_verify() {
1028   if (G1HRRSFlushLogBuffersOnVerify && VerifyBeforeGC && !_g1->full_collection()) {


1029     cleanupHRRS();
1030     _g1->set_refine_cte_cl_concurrency(false);
1031     if (SafepointSynchronize::is_at_safepoint()) {
1032       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1033       dcqs.concatenate_logs();
1034     }
1035     bool cg1r_use_cache = _cg1r->use_cache();
1036     _cg1r->set_use_cache(false);
1037     updateRS(0);
1038     _cg1r->set_use_cache(cg1r_use_cache);


1039   }
1040 }


 699 
 700 class ConcRefineRegionClosure: public HeapRegionClosure {
 701   G1CollectedHeap* _g1h;
 702   CardTableModRefBS* _ctbs;
 703   ConcurrentGCThread* _cgc_thrd;
 704   ConcurrentG1Refine* _cg1r;
 705   unsigned _cards_processed;
 706   UpdateRSOopClosure _update_rs_oop_cl;
 707 public:
 708   ConcRefineRegionClosure(CardTableModRefBS* ctbs,
 709                           ConcurrentG1Refine* cg1r,
 710                           HRInto_G1RemSet* g1rs) :
 711     _ctbs(ctbs), _cg1r(cg1r), _cgc_thrd(cg1r->cg1rThread()),
 712     _update_rs_oop_cl(g1rs), _cards_processed(0),
 713     _g1h(G1CollectedHeap::heap())
 714   {}
 715 
 716   bool doHeapRegion(HeapRegion* r) {
 717     if (!r->in_collection_set() &&
 718         !r->continuesHumongous() &&
 719         !r->is_young()) {

 720       _update_rs_oop_cl.set_from(r);
 721       UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
 722 
 723       // For each run of dirty card in the region:
 724       //   1) Clear the cards.
 725       //   2) Process the range corresponding to the run, adding any
 726       //      necessary RS entries.
 727       // 1 must precede 2, so that a concurrent modification redirties the
 728       // card.  If a processing attempt does not succeed, because it runs
 729       // into an unparseable region, we will do binary search to find the
 730       // beginning of the next parseable region.
 731       HeapWord* startAddr = r->bottom();
 732       HeapWord* endAddr = r->used_region().end();
 733       HeapWord* lastAddr;
 734       HeapWord* nextAddr;
 735 
 736       for (nextAddr = lastAddr = startAddr;
 737            nextAddr < endAddr;
 738            nextAddr = lastAddr) {
 739         MemRegion dirtyRegion;


 836 
 837   // Construct the region representing the card.
 838   HeapWord* start = _ct_bs->addr_for(card_ptr);
 839   // And find the region containing it.
 840   HeapRegion* r = _g1->heap_region_containing(start);
 841   if (r == NULL) {
 842     guarantee(_g1->is_in_permanent(start), "Or else where?");
 843     return;  // Not in the G1 heap (might be in perm, for example.)
 844   }
 845   // Why do we have to check here whether a card is on a young region,
 846   // given that we dirty young regions and, as a result, the
 847   // post-barrier is supposed to filter them out and never to enqueue
 848   // them? When we allocate a new region as the "allocation region" we
 849   // actually dirty its cards after we release the lock, since card
 850   // dirtying while holding the lock was a performance bottleneck. So,
 851   // as a result, it is possible for other threads to actually
 852   // allocate objects in the region (after the acquire the lock)
 853   // before all the cards on the region are dirtied. This is unlikely,
 854   // and it doesn't happen often, but it can happen. So, the extra
 855   // check below filters out those cards.
 856   if (r->is_young()) {
 857     return;
 858   }
 859   // While we are processing RSet buffers during the collection, we
 860   // actually don't want to scan any cards on the collection set,
 861   // since we don't want to update remebered sets with entries that
 862   // point into the collection set, given that live objects from the
 863   // collection set are about to move and such entries will be stale
 864   // very soon. This change also deals with a reliability issue which
 865   // involves scanning a card in the collection set and coming across
 866   // an array that was being chunked and looking malformed. Note,
 867   // however, that if evacuation fails, we have to scan any objects
 868   // that were not moved and create any missing entries.
 869   if (r->in_collection_set()) {
 870     return;
 871   }
 872 
 873   // Should we defer it?
 874   if (_cg1r->use_cache()) {
 875     card_ptr = _cg1r->cache_insert(card_ptr);
 876     // If it was not an eviction, nothing to do.


1007                            "  Max = " SIZE_FORMAT "K.",
1008                            blk.total_mem_sz()/K, blk.max_mem_sz()/K);
1009     gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
1010                            " free_lists = " SIZE_FORMAT "K.",
1011                            HeapRegionRemSet::static_mem_size()/K,
1012                            HeapRegionRemSet::fl_mem_size()/K);
1013     gclog_or_tty->print_cr("    %d occupied cards represented.",
1014                            blk.occupied());
1015     gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
1016                            " %s, cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
1017                            blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
1018                            (blk.max_mem_sz_region()->popular() ? "POP" : ""),
1019                            (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
1020                            (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
1021     gclog_or_tty->print_cr("    Did %d coarsenings.",
1022                   HeapRegionRemSet::n_coarsenings());
1023 
1024   }
1025 }
1026 void HRInto_G1RemSet::prepare_for_verify() {
1027   if (G1HRRSFlushLogBuffersOnVerify &&
1028       (VerifyBeforeGC || VerifyAfterGC)
1029       &&  !_g1->full_collection()) {
1030     cleanupHRRS();
1031     _g1->set_refine_cte_cl_concurrency(false);
1032     if (SafepointSynchronize::is_at_safepoint()) {
1033       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1034       dcqs.concatenate_logs();
1035     }
1036     bool cg1r_use_cache = _cg1r->use_cache();
1037     _cg1r->set_use_cache(false);
1038     updateRS(0);
1039     _cg1r->set_use_cache(cg1r_use_cache);
1040 
1041     assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
1042   }
1043 }