< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 60059 : imported patch 8210462-fix-remaining-mentions-of-im


 666   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 667 
 668   // Repeat the asserts from above.
 669   guarantee(cm_thread()->during_cycle(), "invariant");
 670   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 671 }
 672 
 673 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 674   assert_at_safepoint_on_vm_thread();
 675   clear_bitmap(_prev_mark_bitmap, workers, false);
 676 }
 677 
 678 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 679 public:
 680   bool do_heap_region(HeapRegion* r) {
 681     r->note_start_of_marking();
 682     return false;
 683   }
 684 };
 685 
 686 void G1ConcurrentMark::pre_initial_mark() {
 687   assert_at_safepoint_on_vm_thread();
 688 
 689   // Reset marking state.
 690   reset();
 691 
 692   // For each region note start of marking.
 693   NoteStartOfMarkHRClosure startcl;
 694   _g1h->heap_region_iterate(&startcl);
 695 
 696   _root_regions.reset();
 697 }
 698 
 699 
 700 void G1ConcurrentMark::post_initial_mark() {
 701   // Start Concurrent Marking weak-reference discovery.
 702   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 703   // enable ("weak") refs discovery
 704   rp->enable_discovery();
 705   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 706 
 707   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 708   // This is the start of  the marking cycle, we're expected all
 709   // threads to have SATB queues with active set to false.
 710   satb_mq_set.set_active_all_threads(true, /* new active value */
 711                                      false /* expected_active */);
 712 
 713   _root_regions.prepare_for_scan();
 714 
 715   // update_g1_committed() will be called at the end of an evac pause
 716   // when marking is on. So, it's also called at the end of the
 717   // initial-mark pause to update the heap end, if the heap expands
 718   // during it. No need to call it here.
 719 }
 720 
 721 /*
 722  * Notice that in the next two methods, we actually leave the STS
 723  * during the barrier sync and join it immediately afterwards. If we
 724  * do not do this, the following deadlock can occur: one thread could
 725  * be in the barrier sync code, waiting for the other thread to also
 726  * sync up, whereas another one could be trying to yield, while also
 727  * waiting for the other threads to sync up too.
 728  *
 729  * Note, however, that this code is also used during remark and in
 730  * this case we should not attempt to leave / enter the STS, otherwise
 731  * we'll either hit an assert (debug / fastdebug) or deadlock
 732  * (product). So we should only leave / enter the STS if we are
 733  * operating concurrently.
 734  *
 735  * Because the thread that does the sync barrier has left the STS, it
 736  * is possible to be suspended for a Full GC or an evacuation pause
 737  * could occur. This is actually safe, since the entering the sync


2394 }
2395 
2396 /*****************************************************************************
2397 
2398     The do_marking_step(time_target_ms, ...) method is the building
2399     block of the parallel marking framework. It can be called in parallel
2400     with other invocations of do_marking_step() on different tasks
2401     (but only one per task, obviously) and concurrently with the
2402     mutator threads, or during remark, hence it eliminates the need
2403     for two versions of the code. When called during remark, it will
2404     pick up from where the task left off during the concurrent marking
2405     phase. Interestingly, tasks are also claimable during evacuation
2406     pauses too, since do_marking_step() ensures that it aborts before
2407     it needs to yield.
2408 
2409     The data structures that it uses to do marking work are the
2410     following:
2411 
2412       (1) Marking Bitmap. If there are gray objects that appear only
2413       on the bitmap (this happens either when dealing with an overflow
2414       or when the initial marking phase has simply marked the roots
2415       and didn't push them on the stack), then tasks claim heap
2416       regions whose bitmap they then scan to find gray objects. A
2417       global finger indicates where the end of the last claimed region
2418       is. A local finger indicates how far into the region a task has
2419       scanned. The two fingers are used to determine how to gray an
2420       object (i.e. whether simply marking it is OK, as it will be
2421       visited by a task in the future, or whether it needs to be also
2422       pushed on a stack).
2423 
2424       (2) Local Queue. The local queue of the task which is accessed
2425       reasonably efficiently by the task. Other tasks can steal from
2426       it when they run out of work. Throughout the marking phase, a
2427       task attempts to keep its local queue short but not totally
2428       empty, so that entries are available for stealing by other
2429       tasks. Only when there is no more work, a task will totally
2430       drain its local queue.
2431 
2432       (3) Global Mark Stack. This handles local queue overflow. During
2433       marking only sets of entries are moved between it and the local
2434       queues, as access to it requires a mutex and more fine-grain




 666   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 667 
 668   // Repeat the asserts from above.
 669   guarantee(cm_thread()->during_cycle(), "invariant");
 670   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 671 }
 672 
 673 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 674   assert_at_safepoint_on_vm_thread();
 675   clear_bitmap(_prev_mark_bitmap, workers, false);
 676 }
 677 
 678 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 679 public:
 680   bool do_heap_region(HeapRegion* r) {
 681     r->note_start_of_marking();
 682     return false;
 683   }
 684 };
 685 
 686 void G1ConcurrentMark::pre_concurrent_start() {
 687   assert_at_safepoint_on_vm_thread();
 688 
 689   // Reset marking state.
 690   reset();
 691 
 692   // For each region note start of marking.
 693   NoteStartOfMarkHRClosure startcl;
 694   _g1h->heap_region_iterate(&startcl);
 695 
 696   _root_regions.reset();
 697 }
 698 
 699 
 700 void G1ConcurrentMark::post_concurrent_start() {
 701   // Start Concurrent Marking weak-reference discovery.
 702   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 703   // enable ("weak") refs discovery
 704   rp->enable_discovery();
 705   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 706 
 707   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 708   // This is the start of  the marking cycle, we're expected all
 709   // threads to have SATB queues with active set to false.
 710   satb_mq_set.set_active_all_threads(true, /* new active value */
 711                                      false /* expected_active */);
 712 
 713   _root_regions.prepare_for_scan();
 714 
 715   // update_g1_committed() will be called at the end of an evac pause
 716   // when marking is on. So, it's also called at the end of the
 717   // concurrent start pause to update the heap end, if the heap expands
 718   // during it. No need to call it here.
 719 }
 720 
 721 /*
 722  * Notice that in the next two methods, we actually leave the STS
 723  * during the barrier sync and join it immediately afterwards. If we
 724  * do not do this, the following deadlock can occur: one thread could
 725  * be in the barrier sync code, waiting for the other thread to also
 726  * sync up, whereas another one could be trying to yield, while also
 727  * waiting for the other threads to sync up too.
 728  *
 729  * Note, however, that this code is also used during remark and in
 730  * this case we should not attempt to leave / enter the STS, otherwise
 731  * we'll either hit an assert (debug / fastdebug) or deadlock
 732  * (product). So we should only leave / enter the STS if we are
 733  * operating concurrently.
 734  *
 735  * Because the thread that does the sync barrier has left the STS, it
 736  * is possible to be suspended for a Full GC or an evacuation pause
 737  * could occur. This is actually safe, since the entering the sync


2394 }
2395 
2396 /*****************************************************************************
2397 
2398     The do_marking_step(time_target_ms, ...) method is the building
2399     block of the parallel marking framework. It can be called in parallel
2400     with other invocations of do_marking_step() on different tasks
2401     (but only one per task, obviously) and concurrently with the
2402     mutator threads, or during remark, hence it eliminates the need
2403     for two versions of the code. When called during remark, it will
2404     pick up from where the task left off during the concurrent marking
2405     phase. Interestingly, tasks are also claimable during evacuation
2406     pauses too, since do_marking_step() ensures that it aborts before
2407     it needs to yield.
2408 
2409     The data structures that it uses to do marking work are the
2410     following:
2411 
2412       (1) Marking Bitmap. If there are gray objects that appear only
2413       on the bitmap (this happens either when dealing with an overflow
2414       or when the concurrent start pause has simply marked the roots
2415       and didn't push them on the stack), then tasks claim heap
2416       regions whose bitmap they then scan to find gray objects. A
2417       global finger indicates where the end of the last claimed region
2418       is. A local finger indicates how far into the region a task has
2419       scanned. The two fingers are used to determine how to gray an
2420       object (i.e. whether simply marking it is OK, as it will be
2421       visited by a task in the future, or whether it needs to be also
2422       pushed on a stack).
2423 
2424       (2) Local Queue. The local queue of the task which is accessed
2425       reasonably efficiently by the task. Other tasks can steal from
2426       it when they run out of work. Throughout the marking phase, a
2427       task attempts to keep its local queue short but not totally
2428       empty, so that entries are available for stealing by other
2429       tasks. Only when there is no more work, a task will totally
2430       drain its local queue.
2431 
2432       (3) Global Mark Stack. This handles local queue overflow. During
2433       marking only sets of entries are moved between it and the local
2434       queues, as access to it requires a mutex and more fine-grain


< prev index next >