225 // It might be useful to also make the collision list doubly linked
226 // to avoid iteration over the collisions list during scrubbing/deletion.
227 // OTOH there might not be many collisions.
228
229 PerRegionTable* collision_list_next() const {
230 return _collision_list_next;
231 }
232
233 void set_collision_list_next(PerRegionTable* next) {
234 _collision_list_next = next;
235 }
236
237 PerRegionTable** collision_list_next_addr() {
238 return &_collision_list_next;
239 }
240
241 static size_t fl_mem_size() {
242 PerRegionTable* cur = _free_list;
243 size_t res = 0;
244 while (cur != NULL) {
245 res += sizeof(PerRegionTable);
246 cur = cur->next();
247 }
248 return res;
249 }
250 };
251
252 PerRegionTable* PerRegionTable::_free_list = NULL;
253
254 size_t OtherRegionsTable::_max_fine_entries = 0;
255 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
256 size_t OtherRegionsTable::_fine_eviction_stride = 0;
257 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
258
259 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
260 _g1h(G1CollectedHeap::heap()),
261 _m(Mutex::leaf, "An OtherRegionsTable lock", true),
262 _hr(hr),
263 _coarse_map(G1CollectedHeap::heap()->max_regions(),
264 false /* in-resource-area */),
265 _fine_grain_regions(NULL),
266 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
267 _n_fine_entries(0), _n_coarse_entries(0),
268 _fine_eviction_start(0),
269 _sparse_table(hr)
1130 " for ref " PTR_FORMAT ".\n",
1131 _recorded_cards[i], _recorded_regions[i]->bottom(),
1132 _recorded_oops[i]);
1133 }
1134 }
1135
1136 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1137 SparsePRT::reset_for_cleanup_tasks();
1138 }
1139
1140 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1141 _other_regions.do_cleanup_work(hrrs_cleanup_task);
1142 }
1143
1144 void
1145 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1146 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1147 }
1148
1149 #ifndef PRODUCT
1150 void HeapRegionRemSet::test() {
1151 os::sleep(Thread::current(), (jlong)5000, false);
1152 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1153
1154 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1155 // hash bucket.
1156 HeapRegion* hr0 = g1h->region_at(0);
1157 HeapRegion* hr1 = g1h->region_at(1);
1158 HeapRegion* hr2 = g1h->region_at(5);
1159 HeapRegion* hr3 = g1h->region_at(6);
1160 HeapRegion* hr4 = g1h->region_at(7);
1161 HeapRegion* hr5 = g1h->region_at(8);
1162
1163 HeapWord* hr1_start = hr1->bottom();
1164 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1165 HeapWord* hr1_last = hr1->end() - 1;
1166
1167 HeapWord* hr2_start = hr2->bottom();
1168 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1169 HeapWord* hr2_last = hr2->end() - 1;
|
225 // It might be useful to also make the collision list doubly linked
226 // to avoid iteration over the collisions list during scrubbing/deletion.
227 // OTOH there might not be many collisions.
228
229 PerRegionTable* collision_list_next() const {
230 return _collision_list_next;
231 }
232
233 void set_collision_list_next(PerRegionTable* next) {
234 _collision_list_next = next;
235 }
236
237 PerRegionTable** collision_list_next_addr() {
238 return &_collision_list_next;
239 }
240
241 static size_t fl_mem_size() {
242 PerRegionTable* cur = _free_list;
243 size_t res = 0;
244 while (cur != NULL) {
245 res += cur->mem_size();
246 cur = cur->next();
247 }
248 return res;
249 }
250
251 static void test_fl_mem_size();
252 };
253
254 PerRegionTable* PerRegionTable::_free_list = NULL;
255
256 size_t OtherRegionsTable::_max_fine_entries = 0;
257 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
258 size_t OtherRegionsTable::_fine_eviction_stride = 0;
259 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
260
261 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
262 _g1h(G1CollectedHeap::heap()),
263 _m(Mutex::leaf, "An OtherRegionsTable lock", true),
264 _hr(hr),
265 _coarse_map(G1CollectedHeap::heap()->max_regions(),
266 false /* in-resource-area */),
267 _fine_grain_regions(NULL),
268 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
269 _n_fine_entries(0), _n_coarse_entries(0),
270 _fine_eviction_start(0),
271 _sparse_table(hr)
1132 " for ref " PTR_FORMAT ".\n",
1133 _recorded_cards[i], _recorded_regions[i]->bottom(),
1134 _recorded_oops[i]);
1135 }
1136 }
1137
1138 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1139 SparsePRT::reset_for_cleanup_tasks();
1140 }
1141
1142 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1143 _other_regions.do_cleanup_work(hrrs_cleanup_task);
1144 }
1145
1146 void
1147 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1148 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1149 }
1150
1151 #ifndef PRODUCT
1152 void PerRegionTable::test_fl_mem_size() {
1153 PerRegionTable* dummy = alloc(NULL);
1154 free(dummy);
1155 guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1156 // try to reset the state
1157 _free_list = NULL;
1158 delete dummy;
1159 }
1160
1161 void HeapRegionRemSet::test_prt() {
1162 PerRegionTable::test_fl_mem_size();
1163 }
1164
1165 void HeapRegionRemSet::test() {
1166 os::sleep(Thread::current(), (jlong)5000, false);
1167 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1168
1169 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1170 // hash bucket.
1171 HeapRegion* hr0 = g1h->region_at(0);
1172 HeapRegion* hr1 = g1h->region_at(1);
1173 HeapRegion* hr2 = g1h->region_at(5);
1174 HeapRegion* hr3 = g1h->region_at(6);
1175 HeapRegion* hr4 = g1h->region_at(7);
1176 HeapRegion* hr5 = g1h->region_at(8);
1177
1178 HeapWord* hr1_start = hr1->bottom();
1179 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1180 HeapWord* hr1_last = hr1->end() - 1;
1181
1182 HeapWord* hr2_start = hr2->bottom();
1183 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1184 HeapWord* hr2_last = hr2->end() - 1;
|