96 // If the test below fails, then this table was reused concurrently
97 // with this operation. This is OK, since the old table was coarsened,
98 // and adding a bit to the new table is never incorrect.
99 // If the table used to belong to a continues humongous region and is
100 // now reused for the corresponding start humongous region, we need to
101 // make sure that we detect this. Thus, we call is_in_reserved_raw()
102 // instead of just is_in_reserved() here.
103 if (loc_hr->is_in_reserved(from)) {
104 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
105 CardIdx_t from_card = (CardIdx_t)
106 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
107
108 assert((size_t)from_card < HeapRegion::CardsPerRegion,
109 "Must be in range.");
110 add_card_work(from_card, par);
111 }
112 }
113
114 public:
115
116 HeapRegion* hr() const {
117 return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
118 }
119
120 jint occupied() const {
121 // Overkill, but if we ever need it...
122 // guarantee(_occupied == _bm.count_one_bits(), "Check");
123 return _occupied;
124 }
125
126 void init(HeapRegion* hr, bool clear_links_to_all_list) {
127 if (clear_links_to_all_list) {
128 set_next(NULL);
129 set_prev(NULL);
130 }
131 _collision_list_next = NULL;
132 _occupied = 0;
133 _bm.clear();
134 // Make sure that the bitmap clearing above has been finished before publishing
135 // this PRT to concurrent threads.
136 OrderAccess::release_store_ptr(&_hr, hr);
137 }
138
139 void add_reference(OopOrNarrowOopStar from) {
140 add_reference_work(from, /*parallel*/ true);
141 }
142
143 void seq_add_reference(OopOrNarrowOopStar from) {
144 add_reference_work(from, /*parallel*/ false);
145 }
146
147 void scrub(G1CardLiveData* live_data) {
148 live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
149 recount_occupied();
150 }
151
152 void add_card(CardIdx_t from_card_index) {
153 add_card_work(from_card_index, /*parallel*/ true);
154 }
155
156 void seq_add_card(CardIdx_t from_card_index) {
165
166 // Mem size in bytes.
167 size_t mem_size() const {
168 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
169 }
170
171 // Requires "from" to be in "hr()".
172 bool contains_reference(OopOrNarrowOopStar from) const {
173 assert(hr()->is_in_reserved(from), "Precondition.");
174 size_t card_ind = pointer_delta(from, hr()->bottom(),
175 CardTableModRefBS::card_size);
176 return _bm.at(card_ind);
177 }
178
179 // Bulk-free the PRTs from prt to last, assumes that they are
180 // linked together using their _next field.
181 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
182 while (true) {
183 PerRegionTable* fl = _free_list;
184 last->set_next(fl);
185 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
186 if (res == fl) {
187 return;
188 }
189 }
190 ShouldNotReachHere();
191 }
192
193 static void free(PerRegionTable* prt) {
194 bulk_free(prt, prt);
195 }
196
197 // Returns an initialized PerRegionTable instance.
198 static PerRegionTable* alloc(HeapRegion* hr) {
199 PerRegionTable* fl = _free_list;
200 while (fl != NULL) {
201 PerRegionTable* nxt = fl->next();
202 PerRegionTable* res =
203 (PerRegionTable*)
204 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
205 if (res == fl) {
206 fl->init(hr, true);
207 return fl;
208 } else {
209 fl = _free_list;
210 }
211 }
212 assert(fl == NULL, "Loop condition.");
213 return new PerRegionTable(hr);
214 }
215
216 PerRegionTable* next() const { return _next; }
217 void set_next(PerRegionTable* next) { _next = next; }
218 PerRegionTable* prev() const { return _prev; }
219 void set_prev(PerRegionTable* prev) { _prev = prev; }
220
221 // Accessor and Modification routines for the pointer for the
222 // singly linked collision list that links the PRTs within the
223 // OtherRegionsTable::_fine_grain_regions hash table.
224 //
399 if (_n_fine_entries == _max_fine_entries) {
400 prt = delete_region_table();
401 // There is no need to clear the links to the 'all' list here:
402 // prt will be reused immediately, i.e. remain in the 'all' list.
403 prt->init(from_hr, false /* clear_links_to_all_list */);
404 } else {
405 prt = PerRegionTable::alloc(from_hr);
406 link_to_all(prt);
407 }
408
409 PerRegionTable* first_prt = _fine_grain_regions[ind];
410 prt->set_collision_list_next(first_prt);
411 // The assignment into _fine_grain_regions allows the prt to
412 // start being used concurrently. In addition to
413 // collision_list_next which must be visible (else concurrent
414 // parsing of the list, if any, may fail to see other entries),
415 // the content of the prt must be visible (else for instance
416 // some mark bits may not yet seem cleared or a 'later' update
417 // performed by a concurrent thread could be undone when the
418 // zeroing becomes visible). This requires store ordering.
419 OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
420 _n_fine_entries++;
421
422 if (G1HRRSUseSparseTable) {
423 // Transfer from sparse to fine-grain.
424 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
425 assert(sprt_entry != NULL, "There should have been an entry");
426 for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
427 CardIdx_t c = sprt_entry->card(i);
428 prt->add_card(c);
429 }
430 // Now we can delete the sparse entry.
431 bool res = _sparse_table.delete_entry(from_hrm_ind);
432 assert(res, "It should have been there.");
433 }
434 }
435 assert(prt != NULL && prt->hr() == from_hr, "consequence");
436 }
437 // Note that we can't assert "prt->hr() == from_hr", because of the
438 // possibility of concurrent reuse. But see head comment of
439 // OtherRegionsTable for why this is OK.
|
96 // If the test below fails, then this table was reused concurrently
97 // with this operation. This is OK, since the old table was coarsened,
98 // and adding a bit to the new table is never incorrect.
99 // If the table used to belong to a continues humongous region and is
100 // now reused for the corresponding start humongous region, we need to
101 // make sure that we detect this. Thus, we call is_in_reserved_raw()
102 // instead of just is_in_reserved() here.
103 if (loc_hr->is_in_reserved(from)) {
104 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
105 CardIdx_t from_card = (CardIdx_t)
106 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
107
108 assert((size_t)from_card < HeapRegion::CardsPerRegion,
109 "Must be in range.");
110 add_card_work(from_card, par);
111 }
112 }
113
114 public:
115
116 HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
117
118 jint occupied() const {
119 // Overkill, but if we ever need it...
120 // guarantee(_occupied == _bm.count_one_bits(), "Check");
121 return _occupied;
122 }
123
124 void init(HeapRegion* hr, bool clear_links_to_all_list) {
125 if (clear_links_to_all_list) {
126 set_next(NULL);
127 set_prev(NULL);
128 }
129 _collision_list_next = NULL;
130 _occupied = 0;
131 _bm.clear();
132 // Make sure that the bitmap clearing above has been finished before publishing
133 // this PRT to concurrent threads.
134 OrderAccess::release_store(&_hr, hr);
135 }
136
137 void add_reference(OopOrNarrowOopStar from) {
138 add_reference_work(from, /*parallel*/ true);
139 }
140
141 void seq_add_reference(OopOrNarrowOopStar from) {
142 add_reference_work(from, /*parallel*/ false);
143 }
144
145 void scrub(G1CardLiveData* live_data) {
146 live_data->remove_nonlive_cards(hr()->hrm_index(), &_bm);
147 recount_occupied();
148 }
149
150 void add_card(CardIdx_t from_card_index) {
151 add_card_work(from_card_index, /*parallel*/ true);
152 }
153
154 void seq_add_card(CardIdx_t from_card_index) {
163
164 // Mem size in bytes.
165 size_t mem_size() const {
166 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
167 }
168
169 // Requires "from" to be in "hr()".
170 bool contains_reference(OopOrNarrowOopStar from) const {
171 assert(hr()->is_in_reserved(from), "Precondition.");
172 size_t card_ind = pointer_delta(from, hr()->bottom(),
173 CardTableModRefBS::card_size);
174 return _bm.at(card_ind);
175 }
176
177 // Bulk-free the PRTs from prt to last, assumes that they are
178 // linked together using their _next field.
179 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
180 while (true) {
181 PerRegionTable* fl = _free_list;
182 last->set_next(fl);
183 PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
184 if (res == fl) {
185 return;
186 }
187 }
188 ShouldNotReachHere();
189 }
190
191 static void free(PerRegionTable* prt) {
192 bulk_free(prt, prt);
193 }
194
195 // Returns an initialized PerRegionTable instance.
196 static PerRegionTable* alloc(HeapRegion* hr) {
197 PerRegionTable* fl = _free_list;
198 while (fl != NULL) {
199 PerRegionTable* nxt = fl->next();
200 PerRegionTable* res =
201 Atomic::cmpxchg(nxt, &_free_list, fl);
202 if (res == fl) {
203 fl->init(hr, true);
204 return fl;
205 } else {
206 fl = _free_list;
207 }
208 }
209 assert(fl == NULL, "Loop condition.");
210 return new PerRegionTable(hr);
211 }
212
213 PerRegionTable* next() const { return _next; }
214 void set_next(PerRegionTable* next) { _next = next; }
215 PerRegionTable* prev() const { return _prev; }
216 void set_prev(PerRegionTable* prev) { _prev = prev; }
217
218 // Accessor and Modification routines for the pointer for the
219 // singly linked collision list that links the PRTs within the
220 // OtherRegionsTable::_fine_grain_regions hash table.
221 //
396 if (_n_fine_entries == _max_fine_entries) {
397 prt = delete_region_table();
398 // There is no need to clear the links to the 'all' list here:
399 // prt will be reused immediately, i.e. remain in the 'all' list.
400 prt->init(from_hr, false /* clear_links_to_all_list */);
401 } else {
402 prt = PerRegionTable::alloc(from_hr);
403 link_to_all(prt);
404 }
405
406 PerRegionTable* first_prt = _fine_grain_regions[ind];
407 prt->set_collision_list_next(first_prt);
408 // The assignment into _fine_grain_regions allows the prt to
409 // start being used concurrently. In addition to
410 // collision_list_next which must be visible (else concurrent
411 // parsing of the list, if any, may fail to see other entries),
412 // the content of the prt must be visible (else for instance
413 // some mark bits may not yet seem cleared or a 'later' update
414 // performed by a concurrent thread could be undone when the
415 // zeroing becomes visible). This requires store ordering.
416 OrderAccess::release_store(&_fine_grain_regions[ind], prt);
417 _n_fine_entries++;
418
419 if (G1HRRSUseSparseTable) {
420 // Transfer from sparse to fine-grain.
421 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
422 assert(sprt_entry != NULL, "There should have been an entry");
423 for (int i = 0; i < sprt_entry->num_valid_cards(); i++) {
424 CardIdx_t c = sprt_entry->card(i);
425 prt->add_card(c);
426 }
427 // Now we can delete the sparse entry.
428 bool res = _sparse_table.delete_entry(from_hrm_ind);
429 assert(res, "It should have been there.");
430 }
431 }
432 assert(prt != NULL && prt->hr() == from_hr, "consequence");
433 }
434 // Note that we can't assert "prt->hr() == from_hr", because of the
435 // possibility of concurrent reuse. But see head comment of
436 // OtherRegionsTable for why this is OK.
|