< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 51816 : imported patch 8210557-more-logging


 699   guarantee(cm_thread()->during_cycle(), "invariant");
 700 
 701   // We are finishing up the current cycle by clearing the next
 702   // marking bitmap and getting it ready for the next cycle. During
 703   // this time no other cycle can start. So, let's make sure that this
 704   // is the case.
 705   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 706 
 707   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 708 
 709   // Repeat the asserts from above.
 710   guarantee(cm_thread()->during_cycle(), "invariant");
 711   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 712 }
 713 
 714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 715   assert_at_safepoint_on_vm_thread();
 716   clear_bitmap(_prev_mark_bitmap, workers, false);
 717 }
 718 
 719 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 720   G1CMBitMap* _bitmap;
 721  public:
 722   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 723   }
 724 
 725   virtual bool do_heap_region(HeapRegion* r) {
 726     // This closure can be called concurrently to the mutator, so we must make sure
 727     // that the result of the getNextMarkedWordAddress() call is compared to the
 728     // value passed to it as limit to detect any found bits.
 729     // end never changes in G1.
 730     HeapWord* end = r->end();
 731     return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
 732   }
 733 };
 734 
 735 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
 736   CheckBitmapClearHRClosure cl(_next_mark_bitmap);
 737   _g1h->heap_region_iterate(&cl);
 738   return cl.is_complete();
 739 }
 740 
 741 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 742 public:
 743   bool do_heap_region(HeapRegion* r) {
 744     r->note_start_of_marking();
 745     return false;
 746   }
 747 };
 748 
 749 void G1ConcurrentMark::pre_initial_mark() {
 750   // Initialize marking structures. This has to be done in a STW phase.
 751   reset();
 752 
 753   // For each region note start of marking.
 754   NoteStartOfMarkHRClosure startcl;
 755   _g1h->heap_region_iterate(&startcl);
 756 }
 757 
 758 
 759 void G1ConcurrentMark::post_initial_mark() {
 760   // Start Concurrent Marking weak-reference discovery.




 699   guarantee(cm_thread()->during_cycle(), "invariant");
 700 
 701   // We are finishing up the current cycle by clearing the next
 702   // marking bitmap and getting it ready for the next cycle. During
 703   // this time no other cycle can start. So, let's make sure that this
 704   // is the case.
 705   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 706 
 707   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 708 
 709   // Repeat the asserts from above.
 710   guarantee(cm_thread()->during_cycle(), "invariant");
 711   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 712 }
 713 
 714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 715   assert_at_safepoint_on_vm_thread();
 716   clear_bitmap(_prev_mark_bitmap, workers, false);
 717 }
 718 






















 719 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 720 public:
 721   bool do_heap_region(HeapRegion* r) {
 722     r->note_start_of_marking();
 723     return false;
 724   }
 725 };
 726 
 727 void G1ConcurrentMark::pre_initial_mark() {
 728   // Initialize marking structures. This has to be done in a STW phase.
 729   reset();
 730 
 731   // For each region note start of marking.
 732   NoteStartOfMarkHRClosure startcl;
 733   _g1h->heap_region_iterate(&startcl);
 734 }
 735 
 736 
 737 void G1ConcurrentMark::post_initial_mark() {
 738   // Start Concurrent Marking weak-reference discovery.


< prev index next >