< prev index next >

src/hotspot/share/gc/g1/heapRegionRemSet.cpp

Print this page

  1 /*
  2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *

 53     } else {
 54       fl = _free_list;
 55     }
 56   }
 57   assert(fl == NULL, "Loop condition.");
 58   return new PerRegionTable(hr);
 59 }
 60 
 61 PerRegionTable* volatile PerRegionTable::_free_list = NULL;
 62 
 63 size_t OtherRegionsTable::_max_fine_entries = 0;
 64 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
 65 size_t OtherRegionsTable::_fine_eviction_stride = 0;
 66 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
 67 
 68 OtherRegionsTable::OtherRegionsTable(Mutex* m) :
 69   _g1h(G1CollectedHeap::heap()),
 70   _m(m),
 71   _num_occupied(0),
 72   _coarse_map(mtGC),
 73   _n_coarse_entries(0),
 74   _fine_grain_regions(NULL),
 75   _n_fine_entries(0),
 76   _first_all_fine_prts(NULL),
 77   _last_all_fine_prts(NULL),
 78   _fine_eviction_start(0),
 79   _sparse_table()
 80 {
 81   typedef PerRegionTable* PerRegionTablePtr;
 82 
 83   if (_max_fine_entries == 0) {
 84     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
 85     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
 86     _max_fine_entries = (size_t)1 << max_entries_log;
 87     _mod_max_fine_entries_mask = _max_fine_entries - 1;
 88 
 89     assert(_fine_eviction_sample_size == 0
 90            && _fine_eviction_stride == 0, "All init at same time.");
 91     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
 92     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
 93   }

244         max = cur;
245         max_prev = prev;
246         max_occ = cur_occ;
247       }
248       prev = cur->collision_list_next_addr();
249       cur = cur->collision_list_next();
250     }
251     i = i + _fine_eviction_stride;
252     if (i >= _n_fine_entries) i = i - _n_fine_entries;
253   }
254 
255   _fine_eviction_start++;
256 
257   if (_fine_eviction_start >= _n_fine_entries) {
258     _fine_eviction_start -= _n_fine_entries;
259   }
260 
261   guarantee(max != NULL, "Since _n_fine_entries > 0");
262   guarantee(max_prev != NULL, "Since max != NULL.");
263 
264   // Set the corresponding coarse bit.
265   size_t max_hrm_index = (size_t) max->hr()->hrm_index();
266   if (_n_coarse_entries == 0) {


267     // This will lazily initialize an uninitialized bitmap
268     _coarse_map.reinitialize(G1CollectedHeap::heap()->max_regions());

269     _coarse_map.at_put(max_hrm_index, true);
270     // Release store guarantees that the bitmap has initialized before any
271     // concurrent reader will ever see a non-zero value for _n_coarse_entries
272     // (when read with load_acquire)
273     Atomic::release_store(&_n_coarse_entries, _n_coarse_entries + 1);
274   } else if (!_coarse_map.at(max_hrm_index)) {
275     _coarse_map.at_put(max_hrm_index, true);
276     _n_coarse_entries++;
277   }
278 
279   added_by_deleted = HeapRegion::CardsPerRegion - max_occ;
280   // Unsplice.
281   *max_prev = max->collision_list_next();
282   Atomic::inc(&_n_coarsenings);
283   _n_fine_entries--;
284   return max;
285 }
286 
287 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
288   return occupied() <= limit;
289 }
290 
291 bool OtherRegionsTable::is_empty() const {
292   return occupied() == 0;
293 }
294 
295 size_t OtherRegionsTable::occupied() const {
296   return _num_occupied;

314 size_t OtherRegionsTable::static_mem_size() {
315   return G1FromCardCache::static_mem_size();
316 }
317 
318 size_t OtherRegionsTable::fl_mem_size() {
319   return PerRegionTable::fl_mem_size();
320 }
321 
322 void OtherRegionsTable::clear() {
323   // if there are no entries, skip this step
324   if (_first_all_fine_prts != NULL) {
325     guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
326     PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
327     memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
328   } else {
329     guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
330   }
331 
332   _first_all_fine_prts = _last_all_fine_prts = NULL;
333   _sparse_table.clear();
334   if (_n_coarse_entries > 0) {
335     _coarse_map.clear();
336   }
337   _n_fine_entries = 0;
338   _n_coarse_entries = 0;
339 
340   _num_occupied = 0;
341 }
342 
343 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
344   // Cast away const in this case.
345   MutexLocker x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
346   return contains_reference_locked(from);
347 }
348 
349 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
350   HeapRegion* hr = _g1h->heap_region_containing(from);
351   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
352   // Is this region in the coarse map?
353   if (is_region_coarsened(hr_ind)) return true;
354 
355   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
356                                           hr);
357   if (prt != NULL) {
358     return prt->contains_reference(from);
359   } else {
360     CardIdx_t card_index = card_within_region(from, hr);
361     return _sparse_table.contains_card(hr_ind, card_index);
362   }
363 }
364 
365 // A load_acquire on _n_coarse_entries - coupled with the release_store in
366 // delete_region_table - guarantees we don't access _coarse_map before
367 // it's been properly initialized.
368 bool OtherRegionsTable::is_region_coarsened(RegionIdx_t from_hrm_ind) const {
369   return Atomic::load_acquire(&_n_coarse_entries) > 0 && _coarse_map.at(from_hrm_ind);
370 }
371 
372 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
373                                    HeapRegion* hr)
374   : _bot(bot),
375     _code_roots(),
376     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Mutex::_safepoint_check_never),
377     _other_regions(&_m),
378     _hr(hr),
379     _state(Untracked)
380 {
381 }
382 
383 void HeapRegionRemSet::clear_fcc() {
384   G1FromCardCache::clear(_hr->hrm_index());
385 }
386 
387 void HeapRegionRemSet::setup_remset_size() {
388   const int LOG_M = 20;
389   guarantee(HeapRegion::LogOfHRGrainBytes >= LOG_M, "Code assumes the region size >= 1M, but is " SIZE_FORMAT "B", HeapRegion::GrainBytes);

  1 /*
  2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *

 53     } else {
 54       fl = _free_list;
 55     }
 56   }
 57   assert(fl == NULL, "Loop condition.");
 58   return new PerRegionTable(hr);
 59 }
 60 
 61 PerRegionTable* volatile PerRegionTable::_free_list = NULL;
 62 
 63 size_t OtherRegionsTable::_max_fine_entries = 0;
 64 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
 65 size_t OtherRegionsTable::_fine_eviction_stride = 0;
 66 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
 67 
 68 OtherRegionsTable::OtherRegionsTable(Mutex* m) :
 69   _g1h(G1CollectedHeap::heap()),
 70   _m(m),
 71   _num_occupied(0),
 72   _coarse_map(mtGC),
 73   _has_coarse_entries(false),
 74   _fine_grain_regions(NULL),
 75   _n_fine_entries(0),
 76   _first_all_fine_prts(NULL),
 77   _last_all_fine_prts(NULL),
 78   _fine_eviction_start(0),
 79   _sparse_table()
 80 {
 81   typedef PerRegionTable* PerRegionTablePtr;
 82 
 83   if (_max_fine_entries == 0) {
 84     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
 85     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
 86     _max_fine_entries = (size_t)1 << max_entries_log;
 87     _mod_max_fine_entries_mask = _max_fine_entries - 1;
 88 
 89     assert(_fine_eviction_sample_size == 0
 90            && _fine_eviction_stride == 0, "All init at same time.");
 91     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
 92     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
 93   }

244         max = cur;
245         max_prev = prev;
246         max_occ = cur_occ;
247       }
248       prev = cur->collision_list_next_addr();
249       cur = cur->collision_list_next();
250     }
251     i = i + _fine_eviction_stride;
252     if (i >= _n_fine_entries) i = i - _n_fine_entries;
253   }
254 
255   _fine_eviction_start++;
256 
257   if (_fine_eviction_start >= _n_fine_entries) {
258     _fine_eviction_start -= _n_fine_entries;
259   }
260 
261   guarantee(max != NULL, "Since _n_fine_entries > 0");
262   guarantee(max_prev != NULL, "Since max != NULL.");
263 
264   // Ensure the corresponding coarse bit is set.
265   size_t max_hrm_index = (size_t) max->hr()->hrm_index();
266   if (Atomic::load(&_has_coarse_entries)) {
267     _coarse_map.at_put(max_hrm_index, true);
268   } else {
269     // This will lazily initialize an uninitialized bitmap
270     _coarse_map.reinitialize(G1CollectedHeap::heap()->max_regions());
271     assert(!_coarse_map.at(max_hrm_index), "No coarse entries");
272     _coarse_map.at_put(max_hrm_index, true);
273     // Release store guarantees that the bitmap has initialized before any
274     // concurrent reader will ever see _has_coarse_entries is true
275     // (when read with load_acquire)
276     Atomic::release_store(&_has_coarse_entries, true);



277   }
278 
279   added_by_deleted = HeapRegion::CardsPerRegion - max_occ;
280   // Unsplice.
281   *max_prev = max->collision_list_next();
282   Atomic::inc(&_n_coarsenings);
283   _n_fine_entries--;
284   return max;
285 }
286 
287 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
288   return occupied() <= limit;
289 }
290 
291 bool OtherRegionsTable::is_empty() const {
292   return occupied() == 0;
293 }
294 
295 size_t OtherRegionsTable::occupied() const {
296   return _num_occupied;

314 size_t OtherRegionsTable::static_mem_size() {
315   return G1FromCardCache::static_mem_size();
316 }
317 
318 size_t OtherRegionsTable::fl_mem_size() {
319   return PerRegionTable::fl_mem_size();
320 }
321 
322 void OtherRegionsTable::clear() {
323   // if there are no entries, skip this step
324   if (_first_all_fine_prts != NULL) {
325     guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
326     PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
327     memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
328   } else {
329     guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
330   }
331 
332   _first_all_fine_prts = _last_all_fine_prts = NULL;
333   _sparse_table.clear();
334   if (Atomic::load(&_has_coarse_entries)) {
335     _coarse_map.clear();
336   }
337   _n_fine_entries = 0;
338   Atomic::store(&_has_coarse_entries, false);
339 
340   _num_occupied = 0;
341 }
342 
343 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
344   // Cast away const in this case.
345   MutexLocker x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
346   return contains_reference_locked(from);
347 }
348 
349 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
350   HeapRegion* hr = _g1h->heap_region_containing(from);
351   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
352   // Is this region in the coarse map?
353   if (is_region_coarsened(hr_ind)) return true;
354 
355   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
356                                           hr);
357   if (prt != NULL) {
358     return prt->contains_reference(from);
359   } else {
360     CardIdx_t card_index = card_within_region(from, hr);
361     return _sparse_table.contains_card(hr_ind, card_index);
362   }
363 }
364 
365 // A load_acquire on _has_coarse_entries - coupled with the release_store in
366 // delete_region_table - guarantees we don't access _coarse_map before
367 // it's been properly initialized.
368 bool OtherRegionsTable::is_region_coarsened(RegionIdx_t from_hrm_ind) const {
369   return Atomic::load_acquire(&_has_coarse_entries) && _coarse_map.at(from_hrm_ind);
370 }
371 
372 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
373                                    HeapRegion* hr)
374   : _bot(bot),
375     _code_roots(),
376     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Mutex::_safepoint_check_never),
377     _other_regions(&_m),
378     _hr(hr),
379     _state(Untracked)
380 {
381 }
382 
383 void HeapRegionRemSet::clear_fcc() {
384   G1FromCardCache::clear(_hr->hrm_index());
385 }
386 
387 void HeapRegionRemSet::setup_remset_size() {
388   const int LOG_M = 20;
389   guarantee(HeapRegion::LogOfHRGrainBytes >= LOG_M, "Code assumes the region size >= 1M, but is " SIZE_FORMAT "B", HeapRegion::GrainBytes);
< prev index next >