131 }
132
133 void add_reference_work(OopOrNarrowOopStar from, bool par) {
134 // Must make this robust in case "from" is not in "_hr", because of
135 // concurrency.
136
137 #if HRRS_VERBOSE
138 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
139 from, *from);
140 #endif
141
142 HeapRegion* loc_hr = hr();
143 // If the test below fails, then this table was reused concurrently
144 // with this operation. This is OK, since the old table was coarsened,
145 // and adding a bit to the new table is never incorrect.
146 if (loc_hr->is_in_reserved(from)) {
147 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
148 CardIdx_t from_card = (CardIdx_t)
149 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
150
151 assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
152 "Must be in range.");
153 add_card_work(from_card, par);
154 }
155 }
156
157 public:
158
159 HeapRegion* hr() const { return _hr; }
160
161 #if PRT_COUNT_OCCUPIED
162 jint occupied() const {
163 // Overkill, but if we ever need it...
164 // guarantee(_occupied == _bm.count_one_bits(), "Check");
165 return _occupied;
166 }
167 #else
168 jint occupied() const {
169 return _bm.count_one_bits();
170 }
171 #endif
622 #if HRRS_VERBOSE
623 gclog_or_tty->print_cr(" coarse map hit.");
624 #endif
625 assert(contains_reference(from), "We just added it!");
626 return;
627 }
628
629 // Otherwise find a per-region table to add it to.
630 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
631 PosParPRT* prt = find_region_table(ind, from_hr);
632 if (prt == NULL) {
633 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
634 // Confirm that it's really not there...
635 prt = find_region_table(ind, from_hr);
636 if (prt == NULL) {
637
638 uintptr_t from_hr_bot_card_index =
639 uintptr_t(from_hr->bottom())
640 >> CardTableModRefBS::card_shift;
641 CardIdx_t card_index = from_card - from_hr_bot_card_index;
642 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
643 "Must be in range.");
644 if (G1HRRSUseSparseTable &&
645 _sparse_table.add_card(from_hrs_ind, card_index)) {
646 if (G1RecordHRRSOops) {
647 HeapRegionRemSet::record(hr(), from);
648 #if HRRS_VERBOSE
649 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
650 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
651 align_size_down(uintptr_t(from),
652 CardTableModRefBS::card_size),
653 hr()->bottom(), from);
654 #endif
655 }
656 #if HRRS_VERBOSE
657 gclog_or_tty->print_cr(" added card to sparse table.");
658 #endif
659 assert(contains_reference_locked(from), "We just added it!");
660 return;
661 } else {
662 #if HRRS_VERBOSE
1049
1050 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
1051 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
1052 if (hr == NULL) return false;
1053 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
1054 // Is this region in the coarse map?
1055 if (_coarse_map.at(hr_ind)) return true;
1056
1057 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
1058 hr);
1059 if (prt != NULL) {
1060 return prt->contains_reference(from);
1061
1062 } else {
1063 uintptr_t from_card =
1064 (uintptr_t(from) >> CardTableModRefBS::card_shift);
1065 uintptr_t hr_bot_card_index =
1066 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
1067 assert(from_card >= hr_bot_card_index, "Inv");
1068 CardIdx_t card_index = from_card - hr_bot_card_index;
1069 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
1070 "Must be in range.");
1071 return _sparse_table.contains_card(hr_ind, card_index);
1072 }
1073
1074
1075 }
1076
1077 void
1078 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1079 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
1080 }
1081
1082 // Determines how many threads can add records to an rset in parallel.
1083 // This can be done by either mutator threads together with the
1084 // concurrent refinement threads or GC threads.
1085 int HeapRegionRemSet::num_par_rem_sets() {
1086 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
1087 }
1088
1089 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
1174 }
1175
1176 //-------------------- Iteration --------------------
1177
1178 HeapRegionRemSetIterator::
1179 HeapRegionRemSetIterator() :
1180 _hrrs(NULL),
1181 _g1h(G1CollectedHeap::heap()),
1182 _bosa(NULL),
1183 _sparse_iter() { }
1184
1185 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
1186 _hrrs = hrrs;
1187 _coarse_map = &_hrrs->_other_regions._coarse_map;
1188 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
1189 _bosa = _hrrs->bosa();
1190
1191 _is = Sparse;
1192 // Set these values so that we increment to the first region.
1193 _coarse_cur_region_index = -1;
1194 _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
1195
1196 _cur_region_cur_card = 0;
1197
1198 _fine_array_index = -1;
1199 _fine_cur_prt = NULL;
1200
1201 _n_yielded_coarse = 0;
1202 _n_yielded_fine = 0;
1203 _n_yielded_sparse = 0;
1204
1205 _sparse_iter.init(&hrrs->_other_regions._sparse_table);
1206 }
1207
1208 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1209 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1210 // Go to the next card.
1211 _coarse_cur_region_cur_card++;
1212 // Was the last the last card in the current region?
1213 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1214 // Yes: find the next region. This may leave _coarse_cur_region_index
1253 }
1254 if (_fine_cur_prt == NULL) {
1255 fine_find_next_non_null_prt();
1256 if (_fine_cur_prt == NULL) return false;
1257 }
1258 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
1259 "inv.");
1260 HeapWord* r_bot =
1261 _fine_cur_prt->hr()->bottom();
1262 _cur_region_card_offset = _bosa->index_for(r_bot);
1263 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
1264 }
1265 assert(fine_has_next(), "Or else we exited the loop via the return.");
1266 card_index = _cur_region_card_offset + _cur_region_cur_card;
1267 return true;
1268 }
1269
1270 bool HeapRegionRemSetIterator::fine_has_next() {
1271 return
1272 _fine_cur_prt != NULL &&
1273 _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
1274 }
1275
1276 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1277 switch (_is) {
1278 case Sparse:
1279 if (_sparse_iter.has_next(card_index)) {
1280 _n_yielded_sparse++;
1281 return true;
1282 }
1283 // Otherwise, deliberate fall-through
1284 _is = Fine;
1285 case Fine:
1286 if (fine_has_next(card_index)) {
1287 _n_yielded_fine++;
1288 return true;
1289 }
1290 // Otherwise, deliberate fall-through
1291 _is = Coarse;
1292 case Coarse:
1293 if (coarse_has_next(card_index)) {
|
131 }
132
133 void add_reference_work(OopOrNarrowOopStar from, bool par) {
134 // Must make this robust in case "from" is not in "_hr", because of
135 // concurrency.
136
137 #if HRRS_VERBOSE
138 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
139 from, *from);
140 #endif
141
142 HeapRegion* loc_hr = hr();
143 // If the test below fails, then this table was reused concurrently
144 // with this operation. This is OK, since the old table was coarsened,
145 // and adding a bit to the new table is never incorrect.
146 if (loc_hr->is_in_reserved(from)) {
147 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
148 CardIdx_t from_card = (CardIdx_t)
149 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
150
151 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
152 "Must be in range.");
153 add_card_work(from_card, par);
154 }
155 }
156
157 public:
158
159 HeapRegion* hr() const { return _hr; }
160
161 #if PRT_COUNT_OCCUPIED
162 jint occupied() const {
163 // Overkill, but if we ever need it...
164 // guarantee(_occupied == _bm.count_one_bits(), "Check");
165 return _occupied;
166 }
167 #else
168 jint occupied() const {
169 return _bm.count_one_bits();
170 }
171 #endif
622 #if HRRS_VERBOSE
623 gclog_or_tty->print_cr(" coarse map hit.");
624 #endif
625 assert(contains_reference(from), "We just added it!");
626 return;
627 }
628
629 // Otherwise find a per-region table to add it to.
630 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
631 PosParPRT* prt = find_region_table(ind, from_hr);
632 if (prt == NULL) {
633 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
634 // Confirm that it's really not there...
635 prt = find_region_table(ind, from_hr);
636 if (prt == NULL) {
637
638 uintptr_t from_hr_bot_card_index =
639 uintptr_t(from_hr->bottom())
640 >> CardTableModRefBS::card_shift;
641 CardIdx_t card_index = from_card - from_hr_bot_card_index;
642 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
643 "Must be in range.");
644 if (G1HRRSUseSparseTable &&
645 _sparse_table.add_card(from_hrs_ind, card_index)) {
646 if (G1RecordHRRSOops) {
647 HeapRegionRemSet::record(hr(), from);
648 #if HRRS_VERBOSE
649 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
650 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
651 align_size_down(uintptr_t(from),
652 CardTableModRefBS::card_size),
653 hr()->bottom(), from);
654 #endif
655 }
656 #if HRRS_VERBOSE
657 gclog_or_tty->print_cr(" added card to sparse table.");
658 #endif
659 assert(contains_reference_locked(from), "We just added it!");
660 return;
661 } else {
662 #if HRRS_VERBOSE
1049
1050 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
1051 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
1052 if (hr == NULL) return false;
1053 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
1054 // Is this region in the coarse map?
1055 if (_coarse_map.at(hr_ind)) return true;
1056
1057 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
1058 hr);
1059 if (prt != NULL) {
1060 return prt->contains_reference(from);
1061
1062 } else {
1063 uintptr_t from_card =
1064 (uintptr_t(from) >> CardTableModRefBS::card_shift);
1065 uintptr_t hr_bot_card_index =
1066 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
1067 assert(from_card >= hr_bot_card_index, "Inv");
1068 CardIdx_t card_index = from_card - hr_bot_card_index;
1069 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
1070 "Must be in range.");
1071 return _sparse_table.contains_card(hr_ind, card_index);
1072 }
1073
1074
1075 }
1076
1077 void
1078 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1079 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
1080 }
1081
1082 // Determines how many threads can add records to an rset in parallel.
1083 // This can be done by either mutator threads together with the
1084 // concurrent refinement threads or GC threads.
1085 int HeapRegionRemSet::num_par_rem_sets() {
1086 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
1087 }
1088
1089 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
1174 }
1175
1176 //-------------------- Iteration --------------------
1177
1178 HeapRegionRemSetIterator::
1179 HeapRegionRemSetIterator() :
1180 _hrrs(NULL),
1181 _g1h(G1CollectedHeap::heap()),
1182 _bosa(NULL),
1183 _sparse_iter() { }
1184
1185 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
1186 _hrrs = hrrs;
1187 _coarse_map = &_hrrs->_other_regions._coarse_map;
1188 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
1189 _bosa = _hrrs->bosa();
1190
1191 _is = Sparse;
1192 // Set these values so that we increment to the first region.
1193 _coarse_cur_region_index = -1;
1194 _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
1195
1196 _cur_region_cur_card = 0;
1197
1198 _fine_array_index = -1;
1199 _fine_cur_prt = NULL;
1200
1201 _n_yielded_coarse = 0;
1202 _n_yielded_fine = 0;
1203 _n_yielded_sparse = 0;
1204
1205 _sparse_iter.init(&hrrs->_other_regions._sparse_table);
1206 }
1207
1208 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1209 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1210 // Go to the next card.
1211 _coarse_cur_region_cur_card++;
1212 // Was the last the last card in the current region?
1213 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1214 // Yes: find the next region. This may leave _coarse_cur_region_index
1253 }
1254 if (_fine_cur_prt == NULL) {
1255 fine_find_next_non_null_prt();
1256 if (_fine_cur_prt == NULL) return false;
1257 }
1258 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
1259 "inv.");
1260 HeapWord* r_bot =
1261 _fine_cur_prt->hr()->bottom();
1262 _cur_region_card_offset = _bosa->index_for(r_bot);
1263 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
1264 }
1265 assert(fine_has_next(), "Or else we exited the loop via the return.");
1266 card_index = _cur_region_card_offset + _cur_region_cur_card;
1267 return true;
1268 }
1269
1270 bool HeapRegionRemSetIterator::fine_has_next() {
1271 return
1272 _fine_cur_prt != NULL &&
1273 _cur_region_cur_card < HeapRegion::CardsPerRegion;
1274 }
1275
1276 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1277 switch (_is) {
1278 case Sparse:
1279 if (_sparse_iter.has_next(card_index)) {
1280 _n_yielded_sparse++;
1281 return true;
1282 }
1283 // Otherwise, deliberate fall-through
1284 _is = Fine;
1285 case Fine:
1286 if (fine_has_next(card_index)) {
1287 _n_yielded_fine++;
1288 return true;
1289 }
1290 // Otherwise, deliberate fall-through
1291 _is = Coarse;
1292 case Coarse:
1293 if (coarse_has_next(card_index)) {
|