< prev index next >

src/share/vm/gc/g1/g1RemSet.cpp

Print this page
rev 12056 : [mq]: simplify


 651   G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
 652 
 653   FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
 654                         (check_for_refs_into_cset ?
 655                                 (OopClosure*)&mux :
 656                                 (OopClosure*)&update_rs_oop_cl));
 657 
 658   // The region for the current card may be a young region. The
 659   // current card may have been a card that was evicted from the
 660   // card cache. When the card was inserted into the cache, we had
 661   // determined that its region was non-young. While in the cache,
 662   // the region may have been freed during a cleanup pause, reallocated
 663   // and tagged as young.
 664   //
 665   // We wish to filter out cards for such a region but the current
 666   // thread, if we're running concurrently, may "see" the young type
 667   // change at any time (so an earlier "is_young" check may pass or
 668   // fail arbitrarily). We tell the iteration code to perform this
 669   // filtering when it has been determined that there has been an actual
 670   // allocation in this region and making it safe to check the young type.
 671   bool filter_young = true;
 672 
 673   HeapWord* stop_point =
 674     r->oops_on_card_seq_iterate_careful(dirtyRegion,
 675                                         &filter_then_update_rs_oop_cl,
 676                                         filter_young,
 677                                         card_ptr);
 678 
 679   // If stop_point is non-null, then we encountered an unallocated region
 680   // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
 681   // card and re-enqueue: if we put off the card until a GC pause, then the
 682   // unallocated portion will be filled in.  Alternatively, we might try
 683   // the full complexity of the technique used in "regular" precleaning.
 684   if (stop_point != NULL) {
 685     // The card might have gotten re-dirtied and re-enqueued while we
 686     // worked.  (In fact, it's pretty likely.)
 687     if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
 688       *card_ptr = CardTableModRefBS::dirty_card_val();
 689       MutexLockerEx x(Shared_DirtyCardQ_lock,
 690                       Mutex::_no_safepoint_check_flag);
 691       DirtyCardQueue* sdcq =
 692         JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
 693       sdcq->enqueue(card_ptr);
 694     }
 695   } else {
 696     _conc_refine_cards++;
 697   }
 698 
 699   // This gets set to true if the card being refined has
 700   // references that point into the collection set.
 701   bool has_refs_into_cset = trigger_cl.triggered();
 702 
 703   // We should only be detecting that the card contains references
 704   // that point into the collection set if the current thread is




 651   G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
 652 
 653   FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
 654                         (check_for_refs_into_cset ?
 655                                 (OopClosure*)&mux :
 656                                 (OopClosure*)&update_rs_oop_cl));
 657 
 658   // The region for the current card may be a young region. The
 659   // current card may have been a card that was evicted from the
 660   // card cache. When the card was inserted into the cache, we had
 661   // determined that its region was non-young. While in the cache,
 662   // the region may have been freed during a cleanup pause, reallocated
 663   // and tagged as young.
 664   //
 665   // We wish to filter out cards for such a region but the current
 666   // thread, if we're running concurrently, may "see" the young type
 667   // change at any time (so an earlier "is_young" check may pass or
 668   // fail arbitrarily). We tell the iteration code to perform this
 669   // filtering when it has been determined that there has been an actual
 670   // allocation in this region and making it safe to check the young type.

 671 
 672   bool card_processed =
 673     r->oops_on_card_seq_iterate_careful(dirtyRegion,
 674                                         &filter_then_update_rs_oop_cl,

 675                                         card_ptr);
 676 
 677   // If unable to process the card then we encountered an unparsable
 678   // part of the heap (e.g. a partially allocated object).  Redirty
 679   // and re-enqueue: if we put off the card until a GC pause, then the
 680   // allocation will have completed.
 681   if (!card_processed) {
 682     assert(!_g1->is_gc_active(), "Unparsable heap during GC");
 683     // The card might have gotten re-dirtied and re-enqueued while we
 684     // worked.  (In fact, it's pretty likely.)
 685     if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
 686       *card_ptr = CardTableModRefBS::dirty_card_val();
 687       MutexLockerEx x(Shared_DirtyCardQ_lock,
 688                       Mutex::_no_safepoint_check_flag);
 689       DirtyCardQueue* sdcq =
 690         JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
 691       sdcq->enqueue(card_ptr);
 692     }
 693   } else {
 694     _conc_refine_cards++;
 695   }
 696 
 697   // This gets set to true if the card being refined has
 698   // references that point into the collection set.
 699   bool has_refs_into_cset = trigger_cl.triggered();
 700 
 701   // We should only be detecting that the card contains references
 702   // that point into the collection set if the current thread is


< prev index next >