< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page




 355     // Read it again in case it changed while we were waiting for the lock.
 356     res = _next_survivor;
 357     if (res != NULL) {
 358       if (res == _young_list->last_survivor_region()) {
 359         // We just claimed the last survivor so store NULL to indicate
 360         // that we're done.
 361         _next_survivor = NULL;
 362       } else {
 363         _next_survivor = res->get_next_young_region();
 364       }
 365     } else {
 366       // Someone else claimed the last survivor while we were trying
 367       // to take the lock so nothing else to do.
 368     }
 369   }
 370   assert(res == NULL || res->is_survivor(), "post-condition");
 371 
 372   return res;
 373 }
 374 










 375 void G1CMRootRegions::scan_finished() {
 376   assert(scan_in_progress(), "pre-condition");
 377 
 378   // Currently, only survivors can be root regions.
 379   if (!_should_abort) {
 380     assert(_next_survivor == NULL, "we should have claimed all survivors");
 381   }
 382   _next_survivor = NULL;
 383 
 384   {
 385     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 386     _scan_in_progress = false;
 387     RootRegionScan_lock->notify_all();
 388   }
 389 }
 390 
 391 bool G1CMRootRegions::wait_until_scan_finished() {
 392   if (!scan_in_progress()) return false;
 393 
 394   {
 395     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 396     while (scan_in_progress()) {
 397       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 398     }
 399   }
 400   return true;
 401 }
 402 
 403 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 404   return MAX2((n_par_threads + 2) / 4, 1U);
 405 }
 406 
 407 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 408   _g1h(g1h),


 961   G1ConcurrentMark* _cm;
 962 
 963 public:
 964   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 965     AbstractGangTask("Root Region Scan"), _cm(cm) { }
 966 
 967   void work(uint worker_id) {
 968     assert(Thread::current()->is_ConcurrentGC_thread(),
 969            "this should only be done by a conc GC thread");
 970 
 971     G1CMRootRegions* root_regions = _cm->root_regions();
 972     HeapRegion* hr = root_regions->claim_next();
 973     while (hr != NULL) {
 974       _cm->scanRootRegion(hr, worker_id);
 975       hr = root_regions->claim_next();
 976     }
 977   }
 978 };
 979 
 980 void G1ConcurrentMark::scanRootRegions() {
 981   // Start of concurrent marking.
 982   ClassLoaderDataGraph::clear_claimed_marks();
 983 
 984   // scan_in_progress() will have been set to true only if there was
 985   // at least one root region to scan. So, if it's false, we
 986   // should not attempt to do any further work.
 987   if (root_regions()->scan_in_progress()) {

 988     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 989 
 990     _parallel_marking_threads = calc_parallel_marking_threads();
 991     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
 992            "Maximum number of marking threads exceeded");
 993     uint active_workers = MAX2(1U, parallel_marking_threads());
 994 
 995     G1CMRootRegionScanTask task(this);
 996     _parallel_workers->set_active_workers(active_workers);
 997     _parallel_workers->run_task(&task);
 998 
 999     // It's possible that has_aborted() is true here without actually
1000     // aborting the survivor scan earlier. This is OK as it's
1001     // mainly used for sanity checking.
1002     root_regions()->scan_finished();
1003   }
1004 }
1005 
1006 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1007   assert(!_concurrent_phase_started, "Sanity");




 355     // Read it again in case it changed while we were waiting for the lock.
 356     res = _next_survivor;
 357     if (res != NULL) {
 358       if (res == _young_list->last_survivor_region()) {
 359         // We just claimed the last survivor so store NULL to indicate
 360         // that we're done.
 361         _next_survivor = NULL;
 362       } else {
 363         _next_survivor = res->get_next_young_region();
 364       }
 365     } else {
 366       // Someone else claimed the last survivor while we were trying
 367       // to take the lock so nothing else to do.
 368     }
 369   }
 370   assert(res == NULL || res->is_survivor(), "post-condition");
 371 
 372   return res;
 373 }
 374 
 375 void G1CMRootRegions::notify_scan_done() {
 376   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 377   _scan_in_progress = false;
 378   RootRegionScan_lock->notify_all();
 379 }
 380 
 381 void G1CMRootRegions::cancel_scan() {
 382   notify_scan_done();
 383 }
 384 
 385 void G1CMRootRegions::scan_finished() {
 386   assert(scan_in_progress(), "pre-condition");
 387 
 388   // Currently, only survivors can be root regions.
 389   if (!_should_abort) {
 390     assert(_next_survivor == NULL, "we should have claimed all survivors");
 391   }
 392   _next_survivor = NULL;
 393 
 394   notify_scan_done();




 395 }
 396 
 397 bool G1CMRootRegions::wait_until_scan_finished() {
 398   if (!scan_in_progress()) return false;
 399 
 400   {
 401     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 402     while (scan_in_progress()) {
 403       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 404     }
 405   }
 406   return true;
 407 }
 408 
 409 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 410   return MAX2((n_par_threads + 2) / 4, 1U);
 411 }
 412 
 413 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 414   _g1h(g1h),


 967   G1ConcurrentMark* _cm;
 968 
 969 public:
 970   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 971     AbstractGangTask("Root Region Scan"), _cm(cm) { }
 972 
 973   void work(uint worker_id) {
 974     assert(Thread::current()->is_ConcurrentGC_thread(),
 975            "this should only be done by a conc GC thread");
 976 
 977     G1CMRootRegions* root_regions = _cm->root_regions();
 978     HeapRegion* hr = root_regions->claim_next();
 979     while (hr != NULL) {
 980       _cm->scanRootRegion(hr, worker_id);
 981       hr = root_regions->claim_next();
 982     }
 983   }
 984 };
 985 
 986 void G1ConcurrentMark::scanRootRegions() {



 987   // scan_in_progress() will have been set to true only if there was
 988   // at least one root region to scan. So, if it's false, we
 989   // should not attempt to do any further work.
 990   if (root_regions()->scan_in_progress()) {
 991     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 992     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 993 
 994     _parallel_marking_threads = calc_parallel_marking_threads();
 995     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
 996            "Maximum number of marking threads exceeded");
 997     uint active_workers = MAX2(1U, parallel_marking_threads());
 998 
 999     G1CMRootRegionScanTask task(this);
1000     _parallel_workers->set_active_workers(active_workers);
1001     _parallel_workers->run_task(&task);
1002 
1003     // It's possible that has_aborted() is true here without actually
1004     // aborting the survivor scan earlier. This is OK as it's
1005     // mainly used for sanity checking.
1006     root_regions()->scan_finished();
1007   }
1008 }
1009 
1010 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
1011   assert(!_concurrent_phase_started, "Sanity");


< prev index next >