< prev index next >

src/hotspot/share/gc/g1/g1CollectionSet.cpp

Print this page




 188 
 189 void G1CollectionSet::clear() {
 190   assert_at_safepoint_on_vm_thread();
 191   _collection_set_cur_length = 0;
 192 }
 193 
 194 void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
 195   size_t len = _collection_set_cur_length;
 196   OrderAccess::loadload();
 197 
 198   for (uint i = 0; i < len; i++) {
 199     HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
 200     bool result = cl->do_heap_region(r);
 201     if (result) {
 202       cl->set_incomplete();
 203       return;
 204     }
 205   }
 206 }
 207 







 208 void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
 209   assert_at_safepoint();
 210 
 211   for (uint i = 0; i < _num_optional_regions; i++) {
 212     HeapRegion* r = _candidates->at(i);
 213     bool result = cl->do_heap_region(r);
 214     guarantee(!result, "Must not cancel iteration");
 215   }
 216 }
 217 
 218 void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
 219                                                     HeapRegionClaimer* hr_claimer,
 220                                                     uint worker_id,
 221                                                     uint total_workers) const {
 222   assert_at_safepoint();

 223 
 224   size_t len = increment_length();
 225   if (len == 0) {






 226     return;
 227   }
 228 
 229   size_t start_pos = (worker_id * len) / total_workers;
 230   size_t cur_pos = start_pos;
 231 
 232   do {
 233     uint region_idx = _collection_set_regions[cur_pos + _inc_part_start];
 234     if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
 235       HeapRegion* r = _g1h->region_at(region_idx);
 236       bool result = cl->do_heap_region(r);
 237       guarantee(!result, "Must not cancel iteration");
 238     }
 239 
 240     cur_pos++;
 241     if (cur_pos == len) {
 242       cur_pos = 0;
 243     }
 244   } while (cur_pos != start_pos);
 245 }
 246 
 247 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 248                                                      size_t new_rs_length) {
 249   // Update the CSet information that is dependent on the new RS length
 250   assert(hr->is_young(), "Precondition");
 251   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 252 
 253   // We could have updated _inc_recorded_rs_length and
 254   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 255   // that atomically, as this code is executed by a concurrent
 256   // refinement thread, potentially concurrently with a mutator thread
 257   // allocating a new region and also updating the same fields. To
 258   // avoid the atomic operations we accumulate these updates on two
 259   // separate fields (*_diff) and we'll just add them to the "main"
 260   // fields at the start of a GC.
 261 




 188 
 189 void G1CollectionSet::clear() {
 190   assert_at_safepoint_on_vm_thread();
 191   _collection_set_cur_length = 0;
 192 }
 193 
 194 void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
 195   size_t len = _collection_set_cur_length;
 196   OrderAccess::loadload();
 197 
 198   for (uint i = 0; i < len; i++) {
 199     HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
 200     bool result = cl->do_heap_region(r);
 201     if (result) {
 202       cl->set_incomplete();
 203       return;
 204     }
 205   }
 206 }
 207 
 208 void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
 209                                   HeapRegionClaimer* hr_claimer,
 210                                   uint worker_id,
 211                                   uint total_workers) const {
 212   iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id, total_workers);
 213 }
 214 
 215 void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
 216   assert_at_safepoint();
 217 
 218   for (uint i = 0; i < _num_optional_regions; i++) {
 219     HeapRegion* r = _candidates->at(i);
 220     bool result = cl->do_heap_region(r);
 221     guarantee(!result, "Must not cancel iteration");
 222   }
 223 }
 224 
 225 void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
 226                                                     HeapRegionClaimer* hr_claimer,
 227                                                     uint worker_id,
 228                                                     uint total_workers) const {
 229   iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id, total_workers);
 230 }
 231 
 232 void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
 233                                         HeapRegionClaimer* hr_claimer,
 234                                         size_t offset,
 235                                         size_t lenght,
 236                                         uint worker_id,
 237                                         uint total_workers) const {
 238   assert_at_safepoint();
 239   if (lenght == 0) {
 240     return;
 241   }
 242 
 243   size_t start_pos = (worker_id * lenght) / total_workers;
 244   size_t cur_pos = start_pos;
 245 
 246   do {
 247     uint region_idx = _collection_set_regions[cur_pos + offset];
 248     if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
 249       HeapRegion* r = _g1h->region_at(region_idx);
 250       bool result = cl->do_heap_region(r);
 251       guarantee(!result, "Must not cancel iteration");
 252     }
 253 
 254     cur_pos++;
 255     if (cur_pos == lenght) {
 256       cur_pos = 0;
 257     }
 258   } while (cur_pos != start_pos);
 259 }
 260 
 261 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 262                                                      size_t new_rs_length) {
 263   // Update the CSet information that is dependent on the new RS length
 264   assert(hr->is_young(), "Precondition");
 265   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 266 
 267   // We could have updated _inc_recorded_rs_length and
 268   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 269   // that atomically, as this code is executed by a concurrent
 270   // refinement thread, potentially concurrently with a mutator thread
 271   // allocating a new region and also updating the same fields. To
 272   // avoid the atomic operations we accumulate these updates on two
 273   // separate fields (*_diff) and we'll just add them to the "main"
 274   // fields at the start of a GC.
 275 


< prev index next >