180 (volatile jint*) &_first_par_unreserved_idx);
181 assert(regions_length() > res + chunk_size - 1,
182 "Should already have been expanded");
183 return res - chunk_size;
184 }
185
186 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
187 assert(regions_at(index) == NULL, "precondition");
188 assert(hr->is_old(), "should be old but is %s", hr->get_type_str());
189 regions_at_put(index, hr);
190 hr->calc_gc_efficiency();
191 }
192
193 void CollectionSetChooser::update_totals(uint region_num,
194 size_t reclaimable_bytes) {
195 // Only take the lock if we actually need to update the totals.
196 if (region_num > 0) {
197 assert(reclaimable_bytes > 0, "invariant");
198 // We could have just used atomics instead of taking the
199 // lock. However, we currently don't have an atomic add for size_t.
200 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
201 _end += region_num;
202 _remaining_reclaimable_bytes += reclaimable_bytes;
203 } else {
204 assert(reclaimable_bytes == 0, "invariant");
205 }
206 }
207
208 void CollectionSetChooser::iterate(HeapRegionClosure* cl) {
209 for (uint i = _front; i < _end; i++) {
210 HeapRegion* r = regions_at(i);
211 if (cl->do_heap_region(r)) {
212 cl->set_incomplete();
213 break;
214 }
215 }
216 }
217
218 void CollectionSetChooser::clear() {
219 _regions.clear();
220 _front = 0;
|
180 (volatile jint*) &_first_par_unreserved_idx);
181 assert(regions_length() > res + chunk_size - 1,
182 "Should already have been expanded");
183 return res - chunk_size;
184 }
185
186 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
187 assert(regions_at(index) == NULL, "precondition");
188 assert(hr->is_old(), "should be old but is %s", hr->get_type_str());
189 regions_at_put(index, hr);
190 hr->calc_gc_efficiency();
191 }
192
193 void CollectionSetChooser::update_totals(uint region_num,
194 size_t reclaimable_bytes) {
195 // Only take the lock if we actually need to update the totals.
196 if (region_num > 0) {
197 assert(reclaimable_bytes > 0, "invariant");
198 // We could have just used atomics instead of taking the
199 // lock. However, we currently don't have an atomic add for size_t.
200 MutexLocker x(ParGCRareEvent_lock);
201 _end += region_num;
202 _remaining_reclaimable_bytes += reclaimable_bytes;
203 } else {
204 assert(reclaimable_bytes == 0, "invariant");
205 }
206 }
207
208 void CollectionSetChooser::iterate(HeapRegionClosure* cl) {
209 for (uint i = _front; i < _end; i++) {
210 HeapRegion* r = regions_at(i);
211 if (cl->do_heap_region(r)) {
212 cl->set_incomplete();
213 break;
214 }
215 }
216 }
217
218 void CollectionSetChooser::clear() {
219 _regions.clear();
220 _front = 0;
|