src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page




 159       _bitmap->clearRange(mr);
 160 
 161       cur += chunk_size_in_words;
 162 
 163       // Abort iteration if after yielding the marking has been aborted.
 164       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 165         return true;
 166       }
 167       // Repeat the asserts from before the start of the closure. We will do them
 168       // as asserts here to minimize their overhead on the product. However, we
 169       // will have them as guarantees at the beginning / end of the bitmap
 170       // clearing to get some checking in the product.
 171       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 172       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 173     }
 174 
 175     return false;
 176   }
 177 };
 178 

























 179 void CMBitMap::clearAll() {

 180   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 181   G1CollectedHeap::heap()->heap_region_iterate(&cl);






 182   guarantee(cl.complete(), "Must have completed iteration.");
 183   return;
 184 }
 185 
 186 void CMBitMap::markRange(MemRegion mr) {
 187   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 188   assert(!mr.is_empty(), "unexpected empty region");
 189   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 190           ((HeapWord *) mr.end())),
 191          "markRange memory region end is not card aligned");
 192   // convert address range into offset range
 193   _bm.at_put_range(heapWordToOffset(mr.start()),
 194                    heapWordToOffset(mr.end()), true);
 195 }
 196 
 197 void CMBitMap::clearRange(MemRegion mr) {
 198   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 199   assert(!mr.is_empty(), "unexpected empty region");
 200   // convert address range into offset range
 201   _bm.at_put_range(heapWordToOffset(mr.start()),


 854 
 855 ConcurrentMark::~ConcurrentMark() {
 856   // The ConcurrentMark instance is never freed.
 857   ShouldNotReachHere();
 858 }
 859 
 860 void ConcurrentMark::clearNextBitmap() {
 861   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 862 
 863   // Make sure that the concurrent mark thread looks to still be in
 864   // the current cycle.
 865   guarantee(cmThread()->during_cycle(), "invariant");
 866 
 867   // We are finishing up the current cycle by clearing the next
 868   // marking bitmap and getting it ready for the next cycle. During
 869   // this time no other cycle can start. So, let's make sure that this
 870   // is the case.
 871   guarantee(!g1h->mark_in_progress(), "invariant");
 872 
 873   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);





 874   g1h->heap_region_iterate(&cl);

 875 
 876   // Clear the liveness counting data. If the marking has been aborted, the abort()
 877   // call already did that.
 878   if (cl.complete()) {
 879     clear_all_count_data();
 880   }
 881 
 882   // Repeat the asserts from above.
 883   guarantee(cmThread()->during_cycle(), "invariant");
 884   guarantee(!g1h->mark_in_progress(), "invariant");
 885 }
 886 
 887 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 888   CMBitMap* _bitmap;
 889   bool _error;
 890  public:
 891   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 892   }
 893 
 894   virtual bool doHeapRegion(HeapRegion* r) {




 159       _bitmap->clearRange(mr);
 160 
 161       cur += chunk_size_in_words;
 162 
 163       // Abort iteration if after yielding the marking has been aborted.
 164       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 165         return true;
 166       }
 167       // Repeat the asserts from before the start of the closure. We will do them
 168       // as asserts here to minimize their overhead on the product. However, we
 169       // will have them as guarantees at the beginning / end of the bitmap
 170       // clearing to get some checking in the product.
 171       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 172       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 173     }
 174 
 175     return false;
 176   }
 177 };
 178 
 179 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 180   ClearBitmapHRClosure* _cl;
 181   uint                  _workload;
 182   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 183 public:
 184   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task") {
 185     assert(n_workers > 0, "Must have at least one worker.");
 186     uint max_regions = G1CollectedHeap::heap()->max_regions();
 187     _workload = (max_regions + n_workers - 1) / n_workers;
 188     assert(n_workers * _workload >= max_regions, "Workloads should cover all regions.");
 189   }
 190 
 191   void work(uint worker_id) {
 192     if (_suspendible) {
 193       SuspendibleThreadSet::join();
 194     }
 195     uint start = worker_id * _workload;
 196     uint end = MIN2(start + _workload, G1CollectedHeap::heap()->max_regions());
 197     G1CollectedHeap::heap()->heap_region_iterate_range(_cl, start, end);
 198     if (_suspendible) {
 199       SuspendibleThreadSet::leave();
 200     }
 201   }
 202 };
 203 
 204 void CMBitMap::clearAll() {
 205   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 206   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 207   if (g1h->use_parallel_gc_threads()) {
 208     uint n_workers = g1h->workers()->active_workers();
 209     ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 210     g1h->workers()->run_task(&task);
 211   } else {
 212     g1h->heap_region_iterate(&cl);
 213   }
 214   guarantee(cl.complete(), "Must have completed iteration.");
 215   return;
 216 }
 217 
 218 void CMBitMap::markRange(MemRegion mr) {
 219   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 220   assert(!mr.is_empty(), "unexpected empty region");
 221   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 222           ((HeapWord *) mr.end())),
 223          "markRange memory region end is not card aligned");
 224   // convert address range into offset range
 225   _bm.at_put_range(heapWordToOffset(mr.start()),
 226                    heapWordToOffset(mr.end()), true);
 227 }
 228 
 229 void CMBitMap::clearRange(MemRegion mr) {
 230   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 231   assert(!mr.is_empty(), "unexpected empty region");
 232   // convert address range into offset range
 233   _bm.at_put_range(heapWordToOffset(mr.start()),


 886 
 887 ConcurrentMark::~ConcurrentMark() {
 888   // The ConcurrentMark instance is never freed.
 889   ShouldNotReachHere();
 890 }
 891 
 892 void ConcurrentMark::clearNextBitmap() {
 893   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 894 
 895   // Make sure that the concurrent mark thread looks to still be in
 896   // the current cycle.
 897   guarantee(cmThread()->during_cycle(), "invariant");
 898 
 899   // We are finishing up the current cycle by clearing the next
 900   // marking bitmap and getting it ready for the next cycle. During
 901   // this time no other cycle can start. So, let's make sure that this
 902   // is the case.
 903   guarantee(!g1h->mark_in_progress(), "invariant");
 904 
 905   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 906   if (use_parallel_marking_threads()) {
 907     ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 908     _parallel_workers->run_task(&task);
 909   } else {
 910     SuspendibleThreadSetJoiner sts;
 911     g1h->heap_region_iterate(&cl);
 912   }
 913 
 914   // Clear the liveness counting data. If the marking has been aborted, the abort()
 915   // call already did that.
 916   if (cl.complete()) {
 917     clear_all_count_data();
 918   }
 919 
 920   // Repeat the asserts from above.
 921   guarantee(cmThread()->during_cycle(), "invariant");
 922   guarantee(!g1h->mark_in_progress(), "invariant");
 923 }
 924 
 925 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 926   CMBitMap* _bitmap;
 927   bool _error;
 928  public:
 929   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 930   }
 931 
 932   virtual bool doHeapRegion(HeapRegion* r) {