717
718 // Repeat the asserts from above.
719 guarantee(cm_thread()->during_cycle(), "invariant");
720 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
721 }
722
723 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
724 assert_at_safepoint_on_vm_thread();
725 clear_bitmap(_prev_mark_bitmap, workers, false);
726 }
727
728 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
729 public:
730 bool do_heap_region(HeapRegion* r) {
731 r->note_start_of_marking();
732 return false;
733 }
734 };
735
736 void G1ConcurrentMark::pre_initial_mark() {
737 // Initialize marking structures. This has to be done in a STW phase.
738 reset();
739
740 // For each region note start of marking.
741 NoteStartOfMarkHRClosure startcl;
742 _g1h->heap_region_iterate(&startcl);
743
744 _root_regions.reset();
745 }
746
747
748 void G1ConcurrentMark::post_initial_mark() {
749 // Start Concurrent Marking weak-reference discovery.
750 ReferenceProcessor* rp = _g1h->ref_processor_cm();
751 // enable ("weak") refs discovery
752 rp->enable_discovery();
753 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
754
755 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
756 // This is the start of the marking cycle, we're expected all
757 // threads to have SATB queues with active set to false.
1932 VerifyNoCSetOops(const char* phase, int info = -1) :
1933 _g1h(G1CollectedHeap::heap()),
1934 _phase(phase),
1935 _info(info)
1936 { }
1937
1938 void operator()(G1TaskQueueEntry task_entry) const {
1939 if (task_entry.is_array_slice()) {
1940 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1941 return;
1942 }
1943 guarantee(oopDesc::is_oop(task_entry.obj()),
1944 "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1945 p2i(task_entry.obj()), _phase, _info);
1946 guarantee(!_g1h->is_in_cset(task_entry.obj()),
1947 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1948 p2i(task_entry.obj()), _phase, _info);
1949 }
1950 };
1951
1952 void G1ConcurrentMark::verify_no_cset_oops() {
1953 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1954 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1955 return;
1956 }
1957
1958 // Verify entries on the global mark stack
1959 _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1960
1961 // Verify entries on the task queues
1962 for (uint i = 0; i < _max_num_tasks; ++i) {
1963 G1CMTaskQueue* queue = _task_queues->queue(i);
1964 queue->iterate(VerifyNoCSetOops("Queue", i));
1965 }
1966
1967 // Verify the global finger
1968 HeapWord* global_finger = finger();
1969 if (global_finger != NULL && global_finger < _heap.end()) {
1970 // Since we always iterate over all regions, we might get a NULL HeapRegion
1971 // here.
1972 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
|
717
718 // Repeat the asserts from above.
719 guarantee(cm_thread()->during_cycle(), "invariant");
720 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
721 }
722
723 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
724 assert_at_safepoint_on_vm_thread();
725 clear_bitmap(_prev_mark_bitmap, workers, false);
726 }
727
728 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
729 public:
730 bool do_heap_region(HeapRegion* r) {
731 r->note_start_of_marking();
732 return false;
733 }
734 };
735
736 void G1ConcurrentMark::pre_initial_mark() {
737 assert_at_safepoint_on_vm_thread();
738
739 // Reset marking state.
740 reset();
741
742 // For each region note start of marking.
743 NoteStartOfMarkHRClosure startcl;
744 _g1h->heap_region_iterate(&startcl);
745
746 _root_regions.reset();
747 }
748
749
750 void G1ConcurrentMark::post_initial_mark() {
751 // Start Concurrent Marking weak-reference discovery.
752 ReferenceProcessor* rp = _g1h->ref_processor_cm();
753 // enable ("weak") refs discovery
754 rp->enable_discovery();
755 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
756
757 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
758 // This is the start of the marking cycle, we're expected all
759 // threads to have SATB queues with active set to false.
1934 VerifyNoCSetOops(const char* phase, int info = -1) :
1935 _g1h(G1CollectedHeap::heap()),
1936 _phase(phase),
1937 _info(info)
1938 { }
1939
1940 void operator()(G1TaskQueueEntry task_entry) const {
1941 if (task_entry.is_array_slice()) {
1942 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1943 return;
1944 }
1945 guarantee(oopDesc::is_oop(task_entry.obj()),
1946 "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1947 p2i(task_entry.obj()), _phase, _info);
1948 guarantee(!_g1h->is_in_cset(task_entry.obj()),
1949 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1950 p2i(task_entry.obj()), _phase, _info);
1951 }
1952 };
1953
1954 void G1ConcurrentMark::verify_no_collection_set_oops_in_stacks() {
1955 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1956 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1957 return;
1958 }
1959
1960 // Verify entries on the global mark stack
1961 _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1962
1963 // Verify entries on the task queues
1964 for (uint i = 0; i < _max_num_tasks; ++i) {
1965 G1CMTaskQueue* queue = _task_queues->queue(i);
1966 queue->iterate(VerifyNoCSetOops("Queue", i));
1967 }
1968
1969 // Verify the global finger
1970 HeapWord* global_finger = finger();
1971 if (global_finger != NULL && global_finger < _heap.end()) {
1972 // Since we always iterate over all regions, we might get a NULL HeapRegion
1973 // here.
1974 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
|