911 }
912
913 void G1RemSet::prepare_for_scan_heap_roots() {
914 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
915 dcqs.concatenate_logs();
916
917 _scan_state->prepare();
918 }
919
920 class G1MergeHeapRootsTask : public AbstractGangTask {
921
922 // Visitor for remembered sets, dropping entries onto the card table.
923 class G1MergeCardSetClosure : public HeapRegionClosure {
924 G1RemSetScanState* _scan_state;
925 G1CardTable* _ct;
926
927 uint _merged_sparse;
928 uint _merged_fine;
929 uint _merged_coarse;
930
931 // Returns if the region contains cards we need to scan. If so, remember that
932 // region in the current set of dirty regions.
933 bool remember_if_interesting(uint const region_idx) {
934 if (!_scan_state->contains_cards_to_process(region_idx)) {
935 return false;
936 }
937 _scan_state->add_dirty_region(region_idx);
938 return true;
939 }
940 public:
941 G1MergeCardSetClosure(G1RemSetScanState* scan_state) :
942 _scan_state(scan_state),
943 _ct(G1CollectedHeap::heap()->card_table()),
944 _merged_sparse(0),
945 _merged_fine(0),
946 _merged_coarse(0) { }
947
948 void next_coarse_prt(uint const region_idx) {
949 if (!remember_if_interesting(region_idx)) {
950 return;
951 }
952
953 _merged_coarse++;
954
955 size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
956 _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion);
957 _scan_state->set_chunk_region_dirty(region_base_idx);
958 }
959
960 void next_fine_prt(uint const region_idx, BitMap* bm) {
961 if (!remember_if_interesting(region_idx)) {
962 return;
963 }
964
965 _merged_fine++;
966
967 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
968 BitMap::idx_t cur = bm->get_next_one_offset(0);
969 while (cur != bm->size()) {
970 _ct->mark_clean_as_dirty(region_base_idx + cur);
971 _scan_state->set_chunk_dirty(region_base_idx + cur);
972 cur = bm->get_next_one_offset(cur + 1);
973 }
974 }
975
976 void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) {
977 if (!remember_if_interesting(region_idx)) {
978 return;
979 }
980
981 _merged_sparse++;
982
983 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
984 for (uint i = 0; i < num_cards; i++) {
985 size_t card_idx = region_base_idx + cards[i];
986 _ct->mark_clean_as_dirty(card_idx);
987 _scan_state->set_chunk_dirty(card_idx);
988 }
989 }
990
991 virtual bool do_heap_region(HeapRegion* r) {
992 assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
993
994 HeapRegionRemSet* rem_set = r->rem_set();
995 if (!rem_set->is_empty()) {
996 rem_set->iterate_prts(*this);
997 }
998
999 return false;
1000 }
1001
1002 size_t merged_sparse() const { return _merged_sparse; }
1003 size_t merged_fine() const { return _merged_fine; }
1004 size_t merged_coarse() const { return _merged_coarse; }
1005 };
1006
1007 // Visitor for the remembered sets of humongous candidate regions to merge their
1008 // remembered set into the card table.
1009 class G1FlushHumongousCandidateRemSets : public HeapRegionClosure {
1010 G1MergeCardSetClosure _cl;
1011
1012 public:
1013 G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { }
1014
1015 virtual bool do_heap_region(HeapRegion* r) {
1016 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1017
1018 if (!r->is_starts_humongous() ||
1019 !g1h->region_attr(r->hrm_index()).is_humongous() ||
1020 r->rem_set()->is_empty()) {
1021 return false;
1022 }
1023
1024 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
1030 // implicitly rebuild anything else during eager reclaim. Note that at the moment
1031 // (and probably never) we do not enter this path if there are other kind of
1032 // remembered sets for this region.
1033 r->rem_set()->clear_locked(true /* only_cardset */);
1034 // Clear_locked() above sets the state to Empty. However we want to continue
1035 // collecting remembered set entries for humongous regions that were not
1036 // reclaimed.
1037 r->rem_set()->set_state_complete();
1038 #ifdef ASSERT
1039 G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index());
1040 assert(region_attr.needs_remset_update(), "must be");
1041 #endif
1042 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
1043
1044 return false;
1045 }
1046
1047 size_t merged_sparse() const { return _cl.merged_sparse(); }
1048 size_t merged_fine() const { return _cl.merged_fine(); }
1049 size_t merged_coarse() const { return _cl.merged_coarse(); }
1050 };
1051
1052 // Visitor for the log buffer entries to merge them into the card table.
1053 class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
1054 G1RemSetScanState* _scan_state;
1055 G1CardTable* _ct;
1056
1057 size_t _cards_dirty;
1058 size_t _cards_skipped;
1059 public:
1060 G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
1061 _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
1062 {}
1063
1064 void do_card_ptr(CardValue* card_ptr, uint worker_id) {
1065 // The only time we care about recording cards that
1066 // contain references that point into the collection set
1067 // is during RSet updating within an evacuation pause.
1068 // In this case worker_id should be the id of a GC worker thread.
1069 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
1131
1132 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ?
1133 G1GCPhaseTimes::MergeRS :
1134 G1GCPhaseTimes::OptMergeRS;
1135
1136 // We schedule flushing the remembered sets of humongous fast reclaim candidates
1137 // onto the card table first to allow the remaining parallelized tasks hide it.
1138 if (_initial_evacuation &&
1139 p->fast_reclaim_humongous_candidates() > 0 &&
1140 !_fast_reclaim_handled &&
1141 !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) {
1142
1143 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
1144
1145 G1FlushHumongousCandidateRemSets cl(_scan_state);
1146 g1h->heap_region_iterate(&cl);
1147
1148 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1149 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1150 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1151 }
1152
1153 // Merge remembered sets of current candidates.
1154 {
1155 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */);
1156 G1MergeCardSetClosure cl(_scan_state);
1157 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
1158
1159 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1160 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1161 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1162 }
1163
1164 // Apply closure to log entries in the HCC.
1165 if (_initial_evacuation && G1HotCardCache::default_use_cache()) {
1166 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1167 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id);
1168 G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1169 g1h->iterate_hcc_closure(&cl, worker_id);
1170
1171 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards);
1172 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards);
1173 }
1174
1175 // Now apply the closure to all remaining log entries.
1176 if (_initial_evacuation) {
1177 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1178 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id);
1179
1180 G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1181 apply_closure_to_dirty_card_buffers(&cl, worker_id);
|
911 }
912
913 void G1RemSet::prepare_for_scan_heap_roots() {
914 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
915 dcqs.concatenate_logs();
916
917 _scan_state->prepare();
918 }
919
920 class G1MergeHeapRootsTask : public AbstractGangTask {
921
922 // Visitor for remembered sets, dropping entries onto the card table.
923 class G1MergeCardSetClosure : public HeapRegionClosure {
924 G1RemSetScanState* _scan_state;
925 G1CardTable* _ct;
926
927 uint _merged_sparse;
928 uint _merged_fine;
929 uint _merged_coarse;
930
931 size_t _cards_dirty;
932
933 // Returns if the region contains cards we need to scan. If so, remember that
934 // region in the current set of dirty regions.
935 bool remember_if_interesting(uint const region_idx) {
936 if (!_scan_state->contains_cards_to_process(region_idx)) {
937 return false;
938 }
939 _scan_state->add_dirty_region(region_idx);
940 return true;
941 }
942 public:
943 G1MergeCardSetClosure(G1RemSetScanState* scan_state) :
944 _scan_state(scan_state),
945 _ct(G1CollectedHeap::heap()->card_table()),
946 _merged_sparse(0),
947 _merged_fine(0),
948 _merged_coarse(0),
949 _cards_dirty(0) { }
950
951 void next_coarse_prt(uint const region_idx) {
952 if (!remember_if_interesting(region_idx)) {
953 return;
954 }
955
956 _merged_coarse++;
957
958 size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
959 _cards_dirty += _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion);
960 _scan_state->set_chunk_region_dirty(region_base_idx);
961 }
962
963 void next_fine_prt(uint const region_idx, BitMap* bm) {
964 if (!remember_if_interesting(region_idx)) {
965 return;
966 }
967
968 _merged_fine++;
969
970 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
971 BitMap::idx_t cur = bm->get_next_one_offset(0);
972 while (cur != bm->size()) {
973 _cards_dirty += _ct->mark_clean_as_dirty(region_base_idx + cur);
974 _scan_state->set_chunk_dirty(region_base_idx + cur);
975 cur = bm->get_next_one_offset(cur + 1);
976 }
977 }
978
979 void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) {
980 if (!remember_if_interesting(region_idx)) {
981 return;
982 }
983
984 _merged_sparse++;
985
986 size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
987 for (uint i = 0; i < num_cards; i++) {
988 size_t card_idx = region_base_idx + cards[i];
989 _cards_dirty += _ct->mark_clean_as_dirty(card_idx);
990 _scan_state->set_chunk_dirty(card_idx);
991 }
992 }
993
994 virtual bool do_heap_region(HeapRegion* r) {
995 assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
996
997 HeapRegionRemSet* rem_set = r->rem_set();
998 if (!rem_set->is_empty()) {
999 rem_set->iterate_prts(*this);
1000 }
1001
1002 return false;
1003 }
1004
1005 size_t merged_sparse() const { return _merged_sparse; }
1006 size_t merged_fine() const { return _merged_fine; }
1007 size_t merged_coarse() const { return _merged_coarse; }
1008
1009 size_t cards_dirty() const { return _cards_dirty; }
1010 };
1011
1012 // Visitor for the remembered sets of humongous candidate regions to merge their
1013 // remembered set into the card table.
1014 class G1FlushHumongousCandidateRemSets : public HeapRegionClosure {
1015 G1MergeCardSetClosure _cl;
1016
1017 public:
1018 G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { }
1019
1020 virtual bool do_heap_region(HeapRegion* r) {
1021 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1022
1023 if (!r->is_starts_humongous() ||
1024 !g1h->region_attr(r->hrm_index()).is_humongous() ||
1025 r->rem_set()->is_empty()) {
1026 return false;
1027 }
1028
1029 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
1035 // implicitly rebuild anything else during eager reclaim. Note that at the moment
1036 // (and probably never) we do not enter this path if there are other kind of
1037 // remembered sets for this region.
1038 r->rem_set()->clear_locked(true /* only_cardset */);
1039 // Clear_locked() above sets the state to Empty. However we want to continue
1040 // collecting remembered set entries for humongous regions that were not
1041 // reclaimed.
1042 r->rem_set()->set_state_complete();
1043 #ifdef ASSERT
1044 G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index());
1045 assert(region_attr.needs_remset_update(), "must be");
1046 #endif
1047 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
1048
1049 return false;
1050 }
1051
1052 size_t merged_sparse() const { return _cl.merged_sparse(); }
1053 size_t merged_fine() const { return _cl.merged_fine(); }
1054 size_t merged_coarse() const { return _cl.merged_coarse(); }
1055
1056 size_t cards_dirty() const { return _cl.cards_dirty(); }
1057 };
1058
1059 // Visitor for the log buffer entries to merge them into the card table.
1060 class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
1061 G1RemSetScanState* _scan_state;
1062 G1CardTable* _ct;
1063
1064 size_t _cards_dirty;
1065 size_t _cards_skipped;
1066 public:
1067 G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
1068 _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
1069 {}
1070
1071 void do_card_ptr(CardValue* card_ptr, uint worker_id) {
1072 // The only time we care about recording cards that
1073 // contain references that point into the collection set
1074 // is during RSet updating within an evacuation pause.
1075 // In this case worker_id should be the id of a GC worker thread.
1076 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
1138
1139 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ?
1140 G1GCPhaseTimes::MergeRS :
1141 G1GCPhaseTimes::OptMergeRS;
1142
1143 // We schedule flushing the remembered sets of humongous fast reclaim candidates
1144 // onto the card table first to allow the remaining parallelized tasks hide it.
1145 if (_initial_evacuation &&
1146 p->fast_reclaim_humongous_candidates() > 0 &&
1147 !_fast_reclaim_handled &&
1148 !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) {
1149
1150 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
1151
1152 G1FlushHumongousCandidateRemSets cl(_scan_state);
1153 g1h->heap_region_iterate(&cl);
1154
1155 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1156 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1157 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1158 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeRSDirtyCards);
1159 }
1160
1161 // Merge remembered sets of current candidates.
1162 {
1163 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */);
1164 G1MergeCardSetClosure cl(_scan_state);
1165 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
1166
1167 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1168 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1169 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1170 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeRSDirtyCards);
1171 }
1172
1173 // Apply closure to log entries in the HCC.
1174 if (_initial_evacuation && G1HotCardCache::default_use_cache()) {
1175 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1176 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id);
1177 G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1178 g1h->iterate_hcc_closure(&cl, worker_id);
1179
1180 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards);
1181 p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards);
1182 }
1183
1184 // Now apply the closure to all remaining log entries.
1185 if (_initial_evacuation) {
1186 assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
1187 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id);
1188
1189 G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
1190 apply_closure_to_dirty_card_buffers(&cl, worker_id);
|