< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page




 212 // while the cycle is active. Given that, during evacuation pauses, we
 213 // do not copy objects that are explicitly marked, what we have to do
 214 // for the root regions is to scan them and mark all objects reachable
 215 // from them. According to the SATB assumptions, we only need to visit
 216 // each object once during marking. So, as long as we finish this scan
 217 // before the next evacuation pause, we can copy the objects from the
 218 // root regions without having to mark them or do anything else to them.
 219 //
 220 // Currently, we only support root region scanning once (at the start
 221 // of the marking cycle) and the root regions are all the survivor
 222 // regions populated during the initial-mark pause.
 223 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
 224 private:
 225   YoungList*           _young_list;
 226   G1ConcurrentMark*    _cm;
 227 
 228   volatile bool        _scan_in_progress;
 229   volatile bool        _should_abort;
 230   HeapRegion* volatile _next_survivor;
 231 


 232 public:
 233   G1CMRootRegions();
 234   // We actually do most of the initialization in this method.
 235   void init(G1CollectedHeap* g1h, G1ConcurrentMark* cm);
 236 
 237   // Reset the claiming / scanning of the root regions.
 238   void prepare_for_scan();
 239 
 240   // Forces get_next() to return NULL so that the iteration aborts early.
 241   void abort() { _should_abort = true; }
 242 
 243   // Return true if the CM thread are actively scanning root regions,
 244   // false otherwise.
 245   bool scan_in_progress() { return _scan_in_progress; }
 246 
 247   // Claim the next root region to scan atomically, or return NULL if
 248   // all have been claimed.
 249   HeapRegion* claim_next();


 250 
 251   // Flag that we're done with root region scanning and notify anyone
 252   // who's waiting on it. If aborted is false, assume that all regions
 253   // have been claimed.
 254   void scan_finished();
 255 
 256   // If CM threads are still scanning root regions, wait until they
 257   // are done. Return true if we had to wait, false otherwise.
 258   bool wait_until_scan_finished();
 259 };
 260 
 261 class ConcurrentMarkThread;
 262 
 263 class G1ConcurrentMark: public CHeapObj<mtGC> {
 264   friend class ConcurrentMarkThread;
 265   friend class G1ParNoteEndTask;
 266   friend class CalcLiveObjectsClosure;
 267   friend class G1CMRefProcTaskProxy;
 268   friend class G1CMRefProcTaskExecutor;
 269   friend class G1CMKeepAliveAndDrainClosure;




 212 // while the cycle is active. Given that, during evacuation pauses, we
 213 // do not copy objects that are explicitly marked, what we have to do
 214 // for the root regions is to scan them and mark all objects reachable
 215 // from them. According to the SATB assumptions, we only need to visit
 216 // each object once during marking. So, as long as we finish this scan
 217 // before the next evacuation pause, we can copy the objects from the
 218 // root regions without having to mark them or do anything else to them.
 219 //
 220 // Currently, we only support root region scanning once (at the start
 221 // of the marking cycle) and the root regions are all the survivor
 222 // regions populated during the initial-mark pause.
 223 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
 224 private:
 225   YoungList*           _young_list;
 226   G1ConcurrentMark*    _cm;
 227 
 228   volatile bool        _scan_in_progress;
 229   volatile bool        _should_abort;
 230   HeapRegion* volatile _next_survivor;
 231 
 232   void notify_scan_done();
 233 
 234 public:
 235   G1CMRootRegions();
 236   // We actually do most of the initialization in this method.
 237   void init(G1CollectedHeap* g1h, G1ConcurrentMark* cm);
 238 
 239   // Reset the claiming / scanning of the root regions.
 240   void prepare_for_scan();
 241 
 242   // Forces get_next() to return NULL so that the iteration aborts early.
 243   void abort() { _should_abort = true; }
 244 
 245   // Return true if the CM thread are actively scanning root regions,
 246   // false otherwise.
 247   bool scan_in_progress() { return _scan_in_progress; }
 248 
 249   // Claim the next root region to scan atomically, or return NULL if
 250   // all have been claimed.
 251   HeapRegion* claim_next();
 252 
 253   void cancel_scan();
 254 
 255   // Flag that we're done with root region scanning and notify anyone
 256   // who's waiting on it. If aborted is false, assume that all regions
 257   // have been claimed.
 258   void scan_finished();
 259 
 260   // If CM threads are still scanning root regions, wait until they
 261   // are done. Return true if we had to wait, false otherwise.
 262   bool wait_until_scan_finished();
 263 };
 264 
 265 class ConcurrentMarkThread;
 266 
 267 class G1ConcurrentMark: public CHeapObj<mtGC> {
 268   friend class ConcurrentMarkThread;
 269   friend class G1ParNoteEndTask;
 270   friend class CalcLiveObjectsClosure;
 271   friend class G1CMRefProcTaskProxy;
 272   friend class G1CMRefProcTaskExecutor;
 273   friend class G1CMKeepAliveAndDrainClosure;


< prev index next >