< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 52719 : [mq]: 8159440-marking-of-promoted-objects-to-concurrent


 239   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 240 
 241   if (cur == NULL) {
 242     return false;
 243   }
 244 
 245   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 246 
 247   add_chunk_to_free_list(cur);
 248   return true;
 249 }
 250 
 251 void G1CMMarkStack::set_empty() {
 252   _chunks_in_chunk_list = 0;
 253   _hwm = 0;
 254   _chunk_list = NULL;
 255   _free_list = NULL;
 256 }
 257 
 258 G1CMRootRegions::G1CMRootRegions() :
 259   _survivors(NULL), _cm(NULL), _scan_in_progress(false),
 260   _should_abort(false), _claimed_survivor_index(0) { }
 261 
 262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 263   _survivors = survivors;
 264   _cm = cm;












 265 }
 266 
 267 void G1CMRootRegions::prepare_for_scan() {
 268   assert(!scan_in_progress(), "pre-condition");
 269 
 270   // Currently, only survivors can be root regions.
 271   _claimed_survivor_index = 0;
 272   _scan_in_progress = _survivors->regions()->is_nonempty();
 273   _should_abort = false;
 274 }
 275 
 276 HeapRegion* G1CMRootRegions::claim_next() {
 277   if (_should_abort) {
 278     // If someone has set the should_abort flag, we return NULL to
 279     // force the caller to bail out of their loop.
 280     return NULL;
 281   }
 282 
 283   // Currently, only survivors can be root regions.
 284   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();

 285 
 286   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 287   if (claimed_index < survivor_regions->length()) {
 288     return survivor_regions->at(claimed_index);
 289   }
 290   return NULL;
 291 }
 292 
 293 uint G1CMRootRegions::num_root_regions() const {
 294   return (uint)_survivors->regions()->length();
 295 }
 296 
 297 void G1CMRootRegions::notify_scan_done() {
 298   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 299   _scan_in_progress = false;
 300   RootRegionScan_lock->notify_all();
 301 }
 302 
 303 void G1CMRootRegions::cancel_scan() {
 304   notify_scan_done();
 305 }
 306 
 307 void G1CMRootRegions::scan_finished() {
 308   assert(scan_in_progress(), "pre-condition");
 309 
 310   // Currently, only survivors can be root regions.
 311   if (!_should_abort) {
 312     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 313     assert((uint)_claimed_survivor_index >= _survivors->length(),
 314            "we should have claimed all survivors, claimed index = %u, length = %u",
 315            (uint)_claimed_survivor_index, _survivors->length());
 316   }
 317 





 318   notify_scan_done();
 319 }
 320 
 321 bool G1CMRootRegions::wait_until_scan_finished() {
 322   if (!scan_in_progress()) {
 323     return false;
 324   }
 325 
 326   {
 327     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 328     while (scan_in_progress()) {
 329       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 330     }
 331   }
 332   return true;
 333 }
 334 
 335 // Returns the maximum number of workers to be used in a concurrent
 336 // phase based on the number of GC workers being used in a STW
 337 // phase.


 389   _accum_task_vtime(NULL),
 390 
 391   _concurrent_workers(NULL),
 392   _num_concurrent_workers(0),
 393   _max_concurrent_workers(0),
 394 
 395   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 396   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 397 {
 398   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 399   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 400 
 401   // Create & start ConcurrentMark thread.
 402   _cm_thread = new G1ConcurrentMarkThread(this);
 403   if (_cm_thread->osthread() == NULL) {
 404     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 405   }
 406 
 407   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 408 
 409   _root_regions.init(_g1h->survivor(), this);
 410 
 411   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 412     // Calculate the number of concurrent worker threads by scaling
 413     // the number of parallel GC threads.
 414     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 415     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 416   }
 417 
 418   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 419   if (ConcGCThreads > ParallelGCThreads) {
 420     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 421                     ConcGCThreads, ParallelGCThreads);
 422     return;
 423   }
 424 
 425   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 426   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 427 
 428   _num_concurrent_workers = ConcGCThreads;
 429   _max_concurrent_workers = _num_concurrent_workers;
 430 


 711 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 712   assert_at_safepoint_on_vm_thread();
 713   clear_bitmap(_prev_mark_bitmap, workers, false);
 714 }
 715 
 716 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 717 public:
 718   bool do_heap_region(HeapRegion* r) {
 719     r->note_start_of_marking();
 720     return false;
 721   }
 722 };
 723 
 724 void G1ConcurrentMark::pre_initial_mark() {
 725   // Initialize marking structures. This has to be done in a STW phase.
 726   reset();
 727 
 728   // For each region note start of marking.
 729   NoteStartOfMarkHRClosure startcl;
 730   _g1h->heap_region_iterate(&startcl);


 731 }
 732 
 733 
 734 void G1ConcurrentMark::post_initial_mark() {
 735   // Start Concurrent Marking weak-reference discovery.
 736   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 737   // enable ("weak") refs discovery
 738   rp->enable_discovery();
 739   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 740 
 741   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 742   // This is the start of  the marking cycle, we're expected all
 743   // threads to have SATB queues with active set to false.
 744   satb_mq_set.set_active_all_threads(true, /* new active value */
 745                                      false /* expected_active */);
 746 
 747   _root_regions.prepare_for_scan();
 748 
 749   // update_g1_committed() will be called at the end of an evac pause
 750   // when marking is on. So, it's also called at the end of the


 842   if (!UseDynamicNumberOfGCThreads ||
 843       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 844        !ForceDynamicNumberOfGCThreads)) {
 845     result = _max_concurrent_workers;
 846   } else {
 847     result =
 848       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 849                                                       1, /* Minimum workers */
 850                                                       _num_concurrent_workers,
 851                                                       Threads::number_of_non_daemon_threads());
 852     // Don't scale the result down by scale_concurrent_workers() because
 853     // that scaling has already gone into "_max_concurrent_workers".
 854   }
 855   assert(result > 0 && result <= _max_concurrent_workers,
 856          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 857          _max_concurrent_workers, result);
 858   return result;
 859 }
 860 
 861 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 862   // Currently, only survivors can be root regions.
 863   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 864   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 865 
 866   const uintx interval = PrefetchScanIntervalInBytes;
 867   HeapWord* curr = hr->bottom();
 868   const HeapWord* end = hr->top();
 869   while (curr < end) {
 870     Prefetch::read(curr, interval);
 871     oop obj = oop(curr);
 872     int size = obj->oop_iterate_size(&cl);
 873     assert(size == obj->size(), "sanity");
 874     curr += size;
 875   }
 876 }
 877 
 878 class G1CMRootRegionScanTask : public AbstractGangTask {
 879   G1ConcurrentMark* _cm;
 880 public:
 881   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 882     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 883 
 884   void work(uint worker_id) {
 885     assert(Thread::current()->is_ConcurrentGC_thread(),
 886            "this should only be done by a conc GC thread");
 887 




 239   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 240 
 241   if (cur == NULL) {
 242     return false;
 243   }
 244 
 245   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 246 
 247   add_chunk_to_free_list(cur);
 248   return true;
 249 }
 250 
 251 void G1CMMarkStack::set_empty() {
 252   _chunks_in_chunk_list = 0;
 253   _hwm = 0;
 254   _chunk_list = NULL;
 255   _free_list = NULL;
 256 }
 257 
 258 G1CMRootRegions::G1CMRootRegions() :
 259   _root_regions(NULL),
 260   _max_regions(0),
 261   _cur_regions(0),
 262   _scan_in_progress(false),
 263   _should_abort(false),
 264   _claimed_root_regions(0) { }
 265 
 266 void G1CMRootRegions::reset(const uint max_regions) {
 267   _root_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC);
 268   _max_regions = max_regions;
 269   _cur_regions = 0;
 270 }
 271 
 272 void G1CMRootRegions::add(HeapRegion* hr) {
 273   assert_at_safepoint();
 274   size_t idx = Atomic::add((size_t)1, &_cur_regions) - 1;
 275   assert(idx < _max_regions, "Trying to add more root regions than there is space %u", _max_regions);
 276   _root_regions[idx] = hr;
 277 }
 278 
 279 void G1CMRootRegions::prepare_for_scan() {
 280   assert(!scan_in_progress(), "pre-condition");
 281 
 282   _scan_in_progress = _cur_regions > 0;
 283 
 284   _claimed_root_regions = 0;
 285   _should_abort = false;
 286 }
 287 
 288 HeapRegion* G1CMRootRegions::claim_next() {
 289   if (_should_abort) {
 290     // If someone has set the should_abort flag, we return NULL to
 291     // force the caller to bail out of their loop.
 292     return NULL;
 293   }
 294 
 295   if (_claimed_root_regions >= _cur_regions) {
 296     return NULL;
 297   }
 298 
 299   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 300   if (claimed_index < _cur_regions) {
 301     return _root_regions[claimed_index];
 302   }
 303   return NULL;
 304 }
 305 
 306 uint G1CMRootRegions::num_root_regions() const {
 307   return (uint)_cur_regions;
 308 }
 309 
 310 void G1CMRootRegions::notify_scan_done() {
 311   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 312   _scan_in_progress = false;
 313   RootRegionScan_lock->notify_all();
 314 }
 315 
 316 void G1CMRootRegions::cancel_scan() {
 317   notify_scan_done();
 318 }
 319 
 320 void G1CMRootRegions::scan_finished() {
 321   assert(scan_in_progress(), "pre-condition");
 322 

 323   if (!_should_abort) {
 324     assert(_claimed_root_regions >= num_root_regions(),
 325            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 326            _claimed_root_regions, num_root_regions());

 327   }
 328 
 329   FREE_C_HEAP_ARRAY(HeapRegion*, _root_regions);
 330   _root_regions = NULL;
 331   _max_regions = 0;
 332   _cur_regions = 0;
 333 
 334   notify_scan_done();
 335 }
 336 
 337 bool G1CMRootRegions::wait_until_scan_finished() {
 338   if (!scan_in_progress()) {
 339     return false;
 340   }
 341 
 342   {
 343     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 344     while (scan_in_progress()) {
 345       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 346     }
 347   }
 348   return true;
 349 }
 350 
 351 // Returns the maximum number of workers to be used in a concurrent
 352 // phase based on the number of GC workers being used in a STW
 353 // phase.


 405   _accum_task_vtime(NULL),
 406 
 407   _concurrent_workers(NULL),
 408   _num_concurrent_workers(0),
 409   _max_concurrent_workers(0),
 410 
 411   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 412   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 413 {
 414   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 415   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 416 
 417   // Create & start ConcurrentMark thread.
 418   _cm_thread = new G1ConcurrentMarkThread(this);
 419   if (_cm_thread->osthread() == NULL) {
 420     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 421   }
 422 
 423   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 424 


 425   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 426     // Calculate the number of concurrent worker threads by scaling
 427     // the number of parallel GC threads.
 428     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 429     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 430   }
 431 
 432   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 433   if (ConcGCThreads > ParallelGCThreads) {
 434     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 435                     ConcGCThreads, ParallelGCThreads);
 436     return;
 437   }
 438 
 439   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 440   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 441 
 442   _num_concurrent_workers = ConcGCThreads;
 443   _max_concurrent_workers = _num_concurrent_workers;
 444 


 725 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 726   assert_at_safepoint_on_vm_thread();
 727   clear_bitmap(_prev_mark_bitmap, workers, false);
 728 }
 729 
 730 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 731 public:
 732   bool do_heap_region(HeapRegion* r) {
 733     r->note_start_of_marking();
 734     return false;
 735   }
 736 };
 737 
 738 void G1ConcurrentMark::pre_initial_mark() {
 739   // Initialize marking structures. This has to be done in a STW phase.
 740   reset();
 741 
 742   // For each region note start of marking.
 743   NoteStartOfMarkHRClosure startcl;
 744   _g1h->heap_region_iterate(&startcl);
 745 
 746   _root_regions.reset(_g1h->max_regions());
 747 }
 748 
 749 
 750 void G1ConcurrentMark::post_initial_mark() {
 751   // Start Concurrent Marking weak-reference discovery.
 752   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 753   // enable ("weak") refs discovery
 754   rp->enable_discovery();
 755   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 756 
 757   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 758   // This is the start of  the marking cycle, we're expected all
 759   // threads to have SATB queues with active set to false.
 760   satb_mq_set.set_active_all_threads(true, /* new active value */
 761                                      false /* expected_active */);
 762 
 763   _root_regions.prepare_for_scan();
 764 
 765   // update_g1_committed() will be called at the end of an evac pause
 766   // when marking is on. So, it's also called at the end of the


 858   if (!UseDynamicNumberOfGCThreads ||
 859       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 860        !ForceDynamicNumberOfGCThreads)) {
 861     result = _max_concurrent_workers;
 862   } else {
 863     result =
 864       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 865                                                       1, /* Minimum workers */
 866                                                       _num_concurrent_workers,
 867                                                       Threads::number_of_non_daemon_threads());
 868     // Don't scale the result down by scale_concurrent_workers() because
 869     // that scaling has already gone into "_max_concurrent_workers".
 870   }
 871   assert(result > 0 && result <= _max_concurrent_workers,
 872          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 873          _max_concurrent_workers, result);
 874   return result;
 875 }
 876 
 877 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 878   assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
 879          "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
 880   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 881 
 882   const uintx interval = PrefetchScanIntervalInBytes;
 883   HeapWord* curr = hr->next_top_at_mark_start();
 884   const HeapWord* end = hr->top();
 885   while (curr < end) {
 886     Prefetch::read(curr, interval);
 887     oop obj = oop(curr);
 888     int size = obj->oop_iterate_size(&cl);
 889     assert(size == obj->size(), "sanity");
 890     curr += size;
 891   }
 892 }
 893 
 894 class G1CMRootRegionScanTask : public AbstractGangTask {
 895   G1ConcurrentMark* _cm;
 896 public:
 897   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 898     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 899 
 900   void work(uint worker_id) {
 901     assert(Thread::current()->is_ConcurrentGC_thread(),
 902            "this should only be done by a conc GC thread");
 903 


< prev index next >