< prev index

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 7315 : [mq]: 8049341
rev 7316 : [mq]: 8062943


 163       _bitmap->clearRange(mr);
 164 
 165       cur += chunk_size_in_words;
 166 
 167       // Abort iteration if after yielding the marking has been aborted.
 168       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 169         return true;
 170       }
 171       // Repeat the asserts from before the start of the closure. We will do them
 172       // as asserts here to minimize their overhead on the product. However, we
 173       // will have them as guarantees at the beginning / end of the bitmap
 174       // clearing to get some checking in the product.
 175       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 176       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 177     }
 178 
 179     return false;
 180   }
 181 };
 182 




















 183 void CMBitMap::clearAll() {

 184   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 185   G1CollectedHeap::heap()->heap_region_iterate(&cl);


 186   guarantee(cl.complete(), "Must have completed iteration.");
 187   return;
 188 }
 189 
 190 void CMBitMap::markRange(MemRegion mr) {
 191   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 192   assert(!mr.is_empty(), "unexpected empty region");
 193   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 194           ((HeapWord *) mr.end())),
 195          "markRange memory region end is not card aligned");
 196   // convert address range into offset range
 197   _bm.at_put_range(heapWordToOffset(mr.start()),
 198                    heapWordToOffset(mr.end()), true);
 199 }
 200 
 201 void CMBitMap::clearRange(MemRegion mr) {
 202   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 203   assert(!mr.is_empty(), "unexpected empty region");
 204   // convert address range into offset range
 205   _bm.at_put_range(heapWordToOffset(mr.start()),


 844 
 845 ConcurrentMark::~ConcurrentMark() {
 846   // The ConcurrentMark instance is never freed.
 847   ShouldNotReachHere();
 848 }
 849 
 850 void ConcurrentMark::clearNextBitmap() {
 851   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 852 
 853   // Make sure that the concurrent mark thread looks to still be in
 854   // the current cycle.
 855   guarantee(cmThread()->during_cycle(), "invariant");
 856 
 857   // We are finishing up the current cycle by clearing the next
 858   // marking bitmap and getting it ready for the next cycle. During
 859   // this time no other cycle can start. So, let's make sure that this
 860   // is the case.
 861   guarantee(!g1h->mark_in_progress(), "invariant");
 862 
 863   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 864   g1h->heap_region_iterate(&cl);

 865 
 866   // Clear the liveness counting data. If the marking has been aborted, the abort()
 867   // call already did that.
 868   if (cl.complete()) {
 869     clear_all_count_data();
 870   }
 871 
 872   // Repeat the asserts from above.
 873   guarantee(cmThread()->during_cycle(), "invariant");
 874   guarantee(!g1h->mark_in_progress(), "invariant");
 875 }
 876 
 877 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 878   CMBitMap* _bitmap;
 879   bool _error;
 880  public:
 881   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 882   }
 883 
 884   virtual bool doHeapRegion(HeapRegion* r) {




 163       _bitmap->clearRange(mr);
 164 
 165       cur += chunk_size_in_words;
 166 
 167       // Abort iteration if after yielding the marking has been aborted.
 168       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 169         return true;
 170       }
 171       // Repeat the asserts from before the start of the closure. We will do them
 172       // as asserts here to minimize their overhead on the product. However, we
 173       // will have them as guarantees at the beginning / end of the bitmap
 174       // clearing to get some checking in the product.
 175       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 176       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 177     }
 178 
 179     return false;
 180   }
 181 };
 182 
 183 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 184   ClearBitmapHRClosure* _cl;
 185   HeapRegionClaimer     _hrclaimer;
 186   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 187 
 188 public:
 189   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 190       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 191 
 192   void work(uint worker_id) {
 193     if (_suspendible) {
 194       SuspendibleThreadSet::join();
 195     }
 196     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 197     if (_suspendible) {
 198       SuspendibleThreadSet::leave();
 199     }
 200   }
 201 };
 202 
 203 void CMBitMap::clearAll() {
 204   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 205   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 206   uint n_workers = g1h->workers()->active_workers();
 207   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 208   g1h->workers()->run_task(&task);
 209   guarantee(cl.complete(), "Must have completed iteration.");
 210   return;
 211 }
 212 
 213 void CMBitMap::markRange(MemRegion mr) {
 214   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 215   assert(!mr.is_empty(), "unexpected empty region");
 216   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 217           ((HeapWord *) mr.end())),
 218          "markRange memory region end is not card aligned");
 219   // convert address range into offset range
 220   _bm.at_put_range(heapWordToOffset(mr.start()),
 221                    heapWordToOffset(mr.end()), true);
 222 }
 223 
 224 void CMBitMap::clearRange(MemRegion mr) {
 225   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 226   assert(!mr.is_empty(), "unexpected empty region");
 227   // convert address range into offset range
 228   _bm.at_put_range(heapWordToOffset(mr.start()),


 867 
 868 ConcurrentMark::~ConcurrentMark() {
 869   // The ConcurrentMark instance is never freed.
 870   ShouldNotReachHere();
 871 }
 872 
 873 void ConcurrentMark::clearNextBitmap() {
 874   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 875 
 876   // Make sure that the concurrent mark thread looks to still be in
 877   // the current cycle.
 878   guarantee(cmThread()->during_cycle(), "invariant");
 879 
 880   // We are finishing up the current cycle by clearing the next
 881   // marking bitmap and getting it ready for the next cycle. During
 882   // this time no other cycle can start. So, let's make sure that this
 883   // is the case.
 884   guarantee(!g1h->mark_in_progress(), "invariant");
 885 
 886   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 887   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 888   _parallel_workers->run_task(&task);
 889 
 890   // Clear the liveness counting data. If the marking has been aborted, the abort()
 891   // call already did that.
 892   if (cl.complete()) {
 893     clear_all_count_data();
 894   }
 895 
 896   // Repeat the asserts from above.
 897   guarantee(cmThread()->during_cycle(), "invariant");
 898   guarantee(!g1h->mark_in_progress(), "invariant");
 899 }
 900 
 901 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 902   CMBitMap* _bitmap;
 903   bool _error;
 904  public:
 905   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 906   }
 907 
 908   virtual bool doHeapRegion(HeapRegion* r) {


< prev index