84 _occupied++;
85 }
86 }
87 }
88
89 void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 // Must make this robust in case "from" is not in "_hr", because of
91 // concurrency.
92
93 HeapRegion* loc_hr = hr();
94 // If the test below fails, then this table was reused concurrently
95 // with this operation. This is OK, since the old table was coarsened,
96 // and adding a bit to the new table is never incorrect.
97 // If the table used to belong to a continues humongous region and is
98 // now reused for the corresponding start humongous region, we need to
99 // make sure that we detect this. Thus, we call is_in_reserved_raw()
100 // instead of just is_in_reserved() here.
101 if (loc_hr->is_in_reserved(from)) {
102 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
103 CardIdx_t from_card = (CardIdx_t)
104 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
105
106 assert((size_t)from_card < HeapRegion::CardsPerRegion,
107 "Must be in range.");
108 add_card_work(from_card, par);
109 }
110 }
111
112 public:
113
114 HeapRegion* hr() const {
115 return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
116 }
117
118 jint occupied() const {
119 // Overkill, but if we ever need it...
120 // guarantee(_occupied == _bm.count_one_bits(), "Check");
121 return _occupied;
122 }
123
124 void init(HeapRegion* hr, bool clear_links_to_all_list) {
153
154 void seq_add_card(CardIdx_t from_card_index) {
155 add_card_work(from_card_index, /*parallel*/ false);
156 }
157
158 // (Destructively) union the bitmap of the current table into the given
159 // bitmap (which is assumed to be of the same size.)
160 void union_bitmap_into(BitMap* bm) {
161 bm->set_union(_bm);
162 }
163
164 // Mem size in bytes.
165 size_t mem_size() const {
166 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
167 }
168
169 // Requires "from" to be in "hr()".
170 bool contains_reference(OopOrNarrowOopStar from) const {
171 assert(hr()->is_in_reserved(from), "Precondition.");
172 size_t card_ind = pointer_delta(from, hr()->bottom(),
173 CardTableModRefBS::card_size);
174 return _bm.at(card_ind);
175 }
176
177 // Bulk-free the PRTs from prt to last, assumes that they are
178 // linked together using their _next field.
179 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
180 while (true) {
181 PerRegionTable* fl = _free_list;
182 last->set_next(fl);
183 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
184 if (res == fl) {
185 return;
186 }
187 }
188 ShouldNotReachHere();
189 }
190
191 static void free(PerRegionTable* prt) {
192 bulk_free(prt, prt);
193 }
339
340 if (prt->next() != NULL) {
341 prt->next()->set_prev(prt->prev());
342 }
343
344 prt->set_next(NULL);
345 prt->set_prev(NULL);
346
347 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
348 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
349 "just checking");
350 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
351 "just checking");
352 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
353 "just checking");
354 }
355
356 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
357 uint cur_hrm_ind = _hr->hrm_index();
358
359 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
360
361 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
362 assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
363 return;
364 }
365
366 // Note that this may be a continued H region.
367 HeapRegion* from_hr = _g1h->heap_region_containing(from);
368 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
369
370 // If the region is already coarsened, return.
371 if (_coarse_map.at(from_hrm_ind)) {
372 assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
373 return;
374 }
375
376 // Otherwise find a per-region table to add it to.
377 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
378 PerRegionTable* prt = find_region_table(ind, from_hr);
379 if (prt == NULL) {
380 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
381 // Confirm that it's really not there...
382 prt = find_region_table(ind, from_hr);
383 if (prt == NULL) {
384
385 uintptr_t from_hr_bot_card_index =
386 uintptr_t(from_hr->bottom())
387 >> CardTableModRefBS::card_shift;
388 CardIdx_t card_index = from_card - from_hr_bot_card_index;
389 assert((size_t)card_index < HeapRegion::CardsPerRegion,
390 "Must be in range.");
391 if (G1HRRSUseSparseTable &&
392 _sparse_table.add_card(from_hrm_ind, card_index)) {
393 assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
394 return;
395 }
396
397 if (_n_fine_entries == _max_fine_entries) {
398 prt = delete_region_table();
399 // There is no need to clear the links to the 'all' list here:
400 // prt will be reused immediately, i.e. remain in the 'all' list.
401 prt->init(from_hr, false /* clear_links_to_all_list */);
402 } else {
403 prt = PerRegionTable::alloc(from_hr);
404 link_to_all(prt);
405 }
406
407 PerRegionTable* first_prt = _fine_grain_regions[ind];
656
657 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
658 // Cast away const in this case.
659 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
660 return contains_reference_locked(from);
661 }
662
663 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
664 HeapRegion* hr = _g1h->heap_region_containing(from);
665 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
666 // Is this region in the coarse map?
667 if (_coarse_map.at(hr_ind)) return true;
668
669 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
670 hr);
671 if (prt != NULL) {
672 return prt->contains_reference(from);
673
674 } else {
675 uintptr_t from_card =
676 (uintptr_t(from) >> CardTableModRefBS::card_shift);
677 uintptr_t hr_bot_card_index =
678 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
679 assert(from_card >= hr_bot_card_index, "Inv");
680 CardIdx_t card_index = from_card - hr_bot_card_index;
681 assert((size_t)card_index < HeapRegion::CardsPerRegion,
682 "Must be in range.");
683 return _sparse_table.contains_card(hr_ind, card_index);
684 }
685 }
686
687 void
688 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
689 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
690 }
691
692 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
693 HeapRegion* hr)
694 : _bot(bot),
695 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
696 _code_roots(),
697 _other_regions(hr, &_m) {
698 }
|
84 _occupied++;
85 }
86 }
87 }
88
89 void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 // Must make this robust in case "from" is not in "_hr", because of
91 // concurrency.
92
93 HeapRegion* loc_hr = hr();
94 // If the test below fails, then this table was reused concurrently
95 // with this operation. This is OK, since the old table was coarsened,
96 // and adding a bit to the new table is never incorrect.
97 // If the table used to belong to a continues humongous region and is
98 // now reused for the corresponding start humongous region, we need to
99 // make sure that we detect this. Thus, we call is_in_reserved_raw()
100 // instead of just is_in_reserved() here.
101 if (loc_hr->is_in_reserved(from)) {
102 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
103 CardIdx_t from_card = (CardIdx_t)
104 hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
105
106 assert((size_t)from_card < HeapRegion::CardsPerRegion,
107 "Must be in range.");
108 add_card_work(from_card, par);
109 }
110 }
111
112 public:
113
114 HeapRegion* hr() const {
115 return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
116 }
117
118 jint occupied() const {
119 // Overkill, but if we ever need it...
120 // guarantee(_occupied == _bm.count_one_bits(), "Check");
121 return _occupied;
122 }
123
124 void init(HeapRegion* hr, bool clear_links_to_all_list) {
153
154 void seq_add_card(CardIdx_t from_card_index) {
155 add_card_work(from_card_index, /*parallel*/ false);
156 }
157
158 // (Destructively) union the bitmap of the current table into the given
159 // bitmap (which is assumed to be of the same size.)
160 void union_bitmap_into(BitMap* bm) {
161 bm->set_union(_bm);
162 }
163
164 // Mem size in bytes.
165 size_t mem_size() const {
166 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
167 }
168
169 // Requires "from" to be in "hr()".
170 bool contains_reference(OopOrNarrowOopStar from) const {
171 assert(hr()->is_in_reserved(from), "Precondition.");
172 size_t card_ind = pointer_delta(from, hr()->bottom(),
173 G1CardTable::card_size);
174 return _bm.at(card_ind);
175 }
176
177 // Bulk-free the PRTs from prt to last, assumes that they are
178 // linked together using their _next field.
179 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
180 while (true) {
181 PerRegionTable* fl = _free_list;
182 last->set_next(fl);
183 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
184 if (res == fl) {
185 return;
186 }
187 }
188 ShouldNotReachHere();
189 }
190
191 static void free(PerRegionTable* prt) {
192 bulk_free(prt, prt);
193 }
339
340 if (prt->next() != NULL) {
341 prt->next()->set_prev(prt->prev());
342 }
343
344 prt->set_next(NULL);
345 prt->set_prev(NULL);
346
347 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
348 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
349 "just checking");
350 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
351 "just checking");
352 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
353 "just checking");
354 }
355
356 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
357 uint cur_hrm_ind = _hr->hrm_index();
358
359 int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
360
361 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
362 assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
363 return;
364 }
365
366 // Note that this may be a continued H region.
367 HeapRegion* from_hr = _g1h->heap_region_containing(from);
368 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
369
370 // If the region is already coarsened, return.
371 if (_coarse_map.at(from_hrm_ind)) {
372 assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
373 return;
374 }
375
376 // Otherwise find a per-region table to add it to.
377 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
378 PerRegionTable* prt = find_region_table(ind, from_hr);
379 if (prt == NULL) {
380 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
381 // Confirm that it's really not there...
382 prt = find_region_table(ind, from_hr);
383 if (prt == NULL) {
384
385 uintptr_t from_hr_bot_card_index =
386 uintptr_t(from_hr->bottom())
387 >> G1CardTable::card_shift;
388 CardIdx_t card_index = from_card - from_hr_bot_card_index;
389 assert((size_t)card_index < HeapRegion::CardsPerRegion,
390 "Must be in range.");
391 if (G1HRRSUseSparseTable &&
392 _sparse_table.add_card(from_hrm_ind, card_index)) {
393 assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
394 return;
395 }
396
397 if (_n_fine_entries == _max_fine_entries) {
398 prt = delete_region_table();
399 // There is no need to clear the links to the 'all' list here:
400 // prt will be reused immediately, i.e. remain in the 'all' list.
401 prt->init(from_hr, false /* clear_links_to_all_list */);
402 } else {
403 prt = PerRegionTable::alloc(from_hr);
404 link_to_all(prt);
405 }
406
407 PerRegionTable* first_prt = _fine_grain_regions[ind];
656
657 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
658 // Cast away const in this case.
659 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
660 return contains_reference_locked(from);
661 }
662
663 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
664 HeapRegion* hr = _g1h->heap_region_containing(from);
665 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
666 // Is this region in the coarse map?
667 if (_coarse_map.at(hr_ind)) return true;
668
669 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
670 hr);
671 if (prt != NULL) {
672 return prt->contains_reference(from);
673
674 } else {
675 uintptr_t from_card =
676 (uintptr_t(from) >> G1CardTable::card_shift);
677 uintptr_t hr_bot_card_index =
678 uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
679 assert(from_card >= hr_bot_card_index, "Inv");
680 CardIdx_t card_index = from_card - hr_bot_card_index;
681 assert((size_t)card_index < HeapRegion::CardsPerRegion,
682 "Must be in range.");
683 return _sparse_table.contains_card(hr_ind, card_index);
684 }
685 }
686
687 void
688 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
689 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
690 }
691
692 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
693 HeapRegion* hr)
694 : _bot(bot),
695 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
696 _code_roots(),
697 _other_regions(hr, &_m) {
698 }
|