6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/cardTableModRefBS.inline.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/genCollectedHeap.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "logging/log.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/macros.hpp"
34
35 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
36 // enumerate ref fields that have been modified (since the last
37 // enumeration.)
38
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "uninitialized, check declaration order");
43 assert(_page_size != 0, "uninitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
47
48 CardTableModRefBS::CardTableModRefBS(
49 MemRegion whole_heap,
50 const BarrierSet::FakeRtti& fake_rtti) :
51 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
52 _whole_heap(whole_heap),
53 _guard_index(0),
54 _guard_region(),
55 _last_valid_index(0),
56 _page_size(os::vm_page_size()),
57 _byte_map_size(0),
58 _covered(NULL),
59 _committed(NULL),
60 _cur_covered_regions(0),
61 _byte_map(NULL),
62 byte_map_base(NULL)
63 {
64 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
65 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
66
67 assert(card_size <= 512, "card_size must be less than 512"); // why?
68
69 _covered = new MemRegion[_max_covered_regions];
70 if (_covered == NULL) {
71 vm_exit_during_initialization("Could not allocate card table covered region set.");
72 }
73 }
74
75 void CardTableModRefBS::initialize() {
76 _guard_index = cards_required(_whole_heap.word_size()) - 1;
77 _last_valid_index = _guard_index - 1;
78
79 _byte_map_size = compute_byte_map_size();
80
81 HeapWord* low_bound = _whole_heap.start();
82 HeapWord* high_bound = _whole_heap.end();
83
84 _cur_covered_regions = 0;
85 _committed = new MemRegion[_max_covered_regions];
86 if (_committed == NULL) {
87 vm_exit_during_initialization("Could not allocate card table committed region set.");
88 }
89
90 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
91 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
92 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
93
94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
95
96 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
97 _page_size, heap_rs.base(), heap_rs.size());
98 if (!heap_rs.is_reserved()) {
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
103 // The assembler store_check code will do an unsigned shift of the oop,
104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
117 *guard_card = last_card;
118
119 log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
120 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
121 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
122 log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
123 }
124
125 CardTableModRefBS::~CardTableModRefBS() {
126 if (_covered) {
127 delete[] _covered;
128 _covered = NULL;
129 }
130 if (_committed) {
131 delete[] _committed;
132 _committed = NULL;
133 }
134 }
135
136 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
137 int i;
138 for (i = 0; i < _cur_covered_regions; i++) {
139 if (_covered[i].start() == base) return i;
140 if (_covered[i].start() > base) break;
141 }
142 // If we didn't find it, create a new one.
143 assert(_cur_covered_regions < _max_covered_regions,
144 "too many covered regions");
145 // Move the ones above up, to maintain sorted order.
146 for (int j = _cur_covered_regions; j > i; j--) {
147 _covered[j] = _covered[j-1];
148 _committed[j] = _committed[j-1];
149 }
150 int res = i;
151 _cur_covered_regions++;
152 _covered[res].set_start(base);
153 _covered[res].set_word_size(0);
154 jbyte* ct_start = byte_for(base);
155 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
156 _committed[res].set_start((HeapWord*)ct_start_aligned);
157 _committed[res].set_word_size(0);
158 return res;
159 }
160
161 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
162 for (int i = 0; i < _cur_covered_regions; i++) {
163 if (_covered[i].contains(addr)) {
164 return i;
165 }
166 }
167 assert(0, "address outside of heap?");
168 return -1;
169 }
170
171 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
172 HeapWord* max_end = NULL;
173 for (int j = 0; j < ind; j++) {
174 HeapWord* this_end = _committed[j].end();
175 if (this_end > max_end) max_end = this_end;
176 }
177 return max_end;
178 }
179
180 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
181 MemRegion mr) const {
182 MemRegion result = mr;
183 for (int r = 0; r < _cur_covered_regions; r += 1) {
184 if (r != self) {
185 result = result.minus(_committed[r]);
186 }
187 }
188 // Never include the guard page.
189 result = result.minus(_guard_region);
190 return result;
191 }
192
193 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
194 // We don't change the start of a region, only the end.
195 assert(_whole_heap.contains(new_region),
196 "attempt to cover area not in reserved area");
197 debug_only(verify_guard();)
198 // collided is true if the expansion would push into another committed region
199 debug_only(bool collided = false;)
200 int const ind = find_covering_region_by_base(new_region.start());
201 MemRegion const old_region = _covered[ind];
202 assert(old_region.start() == new_region.start(), "just checking");
203 if (new_region.word_size() != old_region.word_size()) {
204 // Commit new or uncommit old pages, if necessary.
205 MemRegion cur_committed = _committed[ind];
206 // Extend the end of this _committed region
207 // to cover the end of any lower _committed regions.
208 // This forms overlapping regions, but never interior regions.
209 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
210 if (max_prev_end > cur_committed.end()) {
211 cur_committed.set_end(max_prev_end);
212 }
213 // Align the end up to a page size (starts are already aligned).
214 jbyte* const new_end = byte_after(new_region.last());
215 HeapWord* new_end_aligned =
216 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
217 assert(new_end_aligned >= (HeapWord*) new_end,
218 "align up, but less");
219 // Check the other regions (excludes "ind") to ensure that
220 // the new_end_aligned does not intrude onto the committed
221 // space of another region.
222 int ri = 0;
223 for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
224 if (new_end_aligned > _committed[ri].start()) {
225 assert(new_end_aligned <= _committed[ri].end(),
226 "An earlier committed region can't cover a later committed region");
227 // Any region containing the new end
228 // should start at or beyond the region found (ind)
229 // for the new end (committed regions are not expected to
230 // be proper subsets of other committed regions).
231 assert(_committed[ri].start() >= _committed[ind].start(),
232 "New end of committed region is inconsistent");
233 new_end_aligned = _committed[ri].start();
234 // new_end_aligned can be equal to the start of its
235 // committed region (i.e., of "ind") if a second
236 // region following "ind" also start at the same location
237 // as "ind".
238 assert(new_end_aligned >= _committed[ind].start(),
239 "New end of committed region is before start");
240 debug_only(collided = true;)
241 // Should only collide with 1 region
242 break;
243 }
244 }
245 #ifdef ASSERT
246 for (++ri; ri < _cur_covered_regions; ri++) {
247 assert(!_committed[ri].contains(new_end_aligned),
248 "New end of committed region is in a second committed region");
249 }
250 #endif
251 // The guard page is always committed and should not be committed over.
252 // "guarded" is used for assertion checking below and recalls the fact
253 // that the would-be end of the new committed region would have
254 // penetrated the guard page.
255 HeapWord* new_end_for_commit = new_end_aligned;
256
257 DEBUG_ONLY(bool guarded = false;)
258 if (new_end_for_commit > _guard_region.start()) {
259 new_end_for_commit = _guard_region.start();
260 DEBUG_ONLY(guarded = true;)
261 }
262
263 if (new_end_for_commit > cur_committed.end()) {
264 // Must commit new pages.
265 MemRegion const new_committed =
266 MemRegion(cur_committed.end(), new_end_for_commit);
267
268 assert(!new_committed.is_empty(), "Region should not be empty here");
269 os::commit_memory_or_exit((char*)new_committed.start(),
270 new_committed.byte_size(), _page_size,
271 !ExecMem, "card table expansion");
272 // Use new_end_aligned (as opposed to new_end_for_commit) because
273 // the cur_committed region may include the guard region.
274 } else if (new_end_aligned < cur_committed.end()) {
275 // Must uncommit pages.
276 MemRegion const uncommit_region =
277 committed_unique_to_self(ind, MemRegion(new_end_aligned,
278 cur_committed.end()));
279 if (!uncommit_region.is_empty()) {
280 // It is not safe to uncommit cards if the boundary between
281 // the generations is moving. A shrink can uncommit cards
282 // owned by generation A but being used by generation B.
283 if (!UseAdaptiveGCBoundary) {
284 if (!os::uncommit_memory((char*)uncommit_region.start(),
285 uncommit_region.byte_size())) {
286 assert(false, "Card table contraction failed");
287 // The call failed so don't change the end of the
288 // committed region. This is better than taking the
289 // VM down.
290 new_end_aligned = _committed[ind].end();
291 }
292 } else {
293 new_end_aligned = _committed[ind].end();
294 }
295 }
296 }
297 // In any case, we can reset the end of the current committed entry.
298 _committed[ind].set_end(new_end_aligned);
299
300 #ifdef ASSERT
301 // Check that the last card in the new region is committed according
302 // to the tables.
303 bool covered = false;
304 for (int cr = 0; cr < _cur_covered_regions; cr++) {
305 if (_committed[cr].contains(new_end - 1)) {
306 covered = true;
307 break;
308 }
309 }
310 assert(covered, "Card for end of new region not committed");
311 #endif
312
313 // The default of 0 is not necessarily clean cards.
314 jbyte* entry;
315 if (old_region.last() < _whole_heap.start()) {
316 entry = byte_for(_whole_heap.start());
317 } else {
318 entry = byte_after(old_region.last());
319 }
320 assert(index_for(new_region.last()) < _guard_index,
321 "The guard card will be overwritten");
322 // This line commented out cleans the newly expanded region and
323 // not the aligned up expanded region.
324 // jbyte* const end = byte_after(new_region.last());
325 jbyte* const end = (jbyte*) new_end_for_commit;
326 assert((end >= byte_after(new_region.last())) || collided || guarded,
327 "Expect to be beyond new region unless impacting another region");
328 // do nothing if we resized downward.
329 #ifdef ASSERT
330 for (int ri = 0; ri < _cur_covered_regions; ri++) {
331 if (ri != ind) {
332 // The end of the new committed region should not
333 // be in any existing region unless it matches
334 // the start of the next region.
335 assert(!_committed[ri].contains(end) ||
336 (_committed[ri].start() == (HeapWord*) end),
337 "Overlapping committed regions");
338 }
339 }
340 #endif
341 if (entry < end) {
342 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
343 }
344 }
345 // In any case, the covered size changes.
346 _covered[ind].set_word_size(new_region.word_size());
347
348 log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
349 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
350 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
351 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
352 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
353 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
354 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
355 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
356 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
357
358 // Touch the last card of the covered region to show that it
359 // is committed (or SEGV).
360 debug_only((void) (*byte_for(_covered[ind].last()));)
361 debug_only(verify_guard();)
362 }
363
364 // Note that these versions are precise! The scanning code has to handle the
365 // fact that the write barrier may be either precise or imprecise.
366
367 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
368 inline_write_ref_field(field, newVal, release);
369 }
370
371
372 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
373 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
374 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
375 jbyte* cur = byte_for(mr.start());
376 jbyte* last = byte_after(mr.last());
377 while (cur < last) {
378 *cur = dirty_card;
379 cur++;
380 }
381 }
382
383 void CardTableModRefBS::invalidate(MemRegion mr) {
384 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
385 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
386 for (int i = 0; i < _cur_covered_regions; i++) {
387 MemRegion mri = mr.intersection(_covered[i]);
388 if (!mri.is_empty()) dirty_MemRegion(mri);
389 }
390 }
391
392 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
393 // Be conservative: only clean cards entirely contained within the
394 // region.
395 jbyte* cur;
396 if (mr.start() == _whole_heap.start()) {
397 cur = byte_for(mr.start());
398 } else {
399 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
400 cur = byte_after(mr.start() - 1);
401 }
402 jbyte* last = byte_after(mr.last());
403 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
404 }
405
406 void CardTableModRefBS::clear(MemRegion mr) {
407 for (int i = 0; i < _cur_covered_regions; i++) {
408 MemRegion mri = mr.intersection(_covered[i]);
409 if (!mri.is_empty()) clear_MemRegion(mri);
410 }
411 }
412
413 void CardTableModRefBS::dirty(MemRegion mr) {
414 jbyte* first = byte_for(mr.start());
415 jbyte* last = byte_after(mr.last());
416 memset(first, dirty_card, last-first);
417 }
418
419 // Unlike several other card table methods, dirty_card_iterate()
420 // iterates over dirty cards ranges in increasing address order.
421 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
422 MemRegionClosure* cl) {
423 for (int i = 0; i < _cur_covered_regions; i++) {
424 MemRegion mri = mr.intersection(_covered[i]);
425 if (!mri.is_empty()) {
426 jbyte *cur_entry, *next_entry, *limit;
427 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
428 cur_entry <= limit;
429 cur_entry = next_entry) {
430 next_entry = cur_entry + 1;
431 if (*cur_entry == dirty_card) {
432 size_t dirty_cards;
433 // Accumulate maximal dirty card range, starting at cur_entry
434 for (dirty_cards = 1;
435 next_entry <= limit && *next_entry == dirty_card;
436 dirty_cards++, next_entry++);
437 MemRegion cur_cards(addr_for(cur_entry),
438 dirty_cards*card_size_in_words);
439 cl->do_MemRegion(cur_cards);
440 }
441 }
442 }
443 }
444 }
445
446 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
447 bool reset,
448 int reset_val) {
449 for (int i = 0; i < _cur_covered_regions; i++) {
450 MemRegion mri = mr.intersection(_covered[i]);
451 if (!mri.is_empty()) {
452 jbyte* cur_entry, *next_entry, *limit;
453 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
454 cur_entry <= limit;
455 cur_entry = next_entry) {
456 next_entry = cur_entry + 1;
457 if (*cur_entry == dirty_card) {
458 size_t dirty_cards;
459 // Accumulate maximal dirty card range, starting at cur_entry
460 for (dirty_cards = 1;
461 next_entry <= limit && *next_entry == dirty_card;
462 dirty_cards++, next_entry++);
463 MemRegion cur_cards(addr_for(cur_entry),
464 dirty_cards*card_size_in_words);
465 if (reset) {
466 for (size_t i = 0; i < dirty_cards; i++) {
467 cur_entry[i] = reset_val;
468 }
469 }
470 return cur_cards;
471 }
472 }
473 }
474 }
475 return MemRegion(mr.end(), mr.end());
476 }
477
478 uintx CardTableModRefBS::ct_max_alignment_constraint() {
479 return card_size * os::vm_page_size();
480 }
481
482 void CardTableModRefBS::verify_guard() {
483 // For product build verification
484 guarantee(_byte_map[_guard_index] == last_card,
485 "card table guard has been modified");
486 }
487
488 void CardTableModRefBS::verify() {
489 verify_guard();
490 }
491
492 #ifndef PRODUCT
493 void CardTableModRefBS::verify_region(MemRegion mr,
494 jbyte val, bool val_equals) {
495 jbyte* start = byte_for(mr.start());
496 jbyte* end = byte_for(mr.last());
497 bool failures = false;
498 for (jbyte* curr = start; curr <= end; ++curr) {
499 jbyte curr_val = *curr;
500 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
501 if (failed) {
502 if (!failures) {
503 log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
504 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
505 failures = true;
506 }
507 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
508 p2i(curr), p2i(addr_for(curr)),
509 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
510 (int) curr_val);
511 }
512 }
513 guarantee(!failures, "there should not have been any failures");
514 }
515
516 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
517 verify_region(mr, dirty_card, false /* val_equals */);
518 }
519
520 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
521 verify_region(mr, dirty_card, true /* val_equals */);
522 }
523 #endif
524
525 void CardTableModRefBS::print_on(outputStream* st) const {
526 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
527 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
528 }
529
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/cardTableModRefBSCodeGen.hpp"
27 #include "gc/shared/c1CardTableModRefBSCodeGen.hpp"
28 #include "gc/shared/c2CardTableModRefBSCodeGen.hpp"
29 #include "gc/shared/cardTableModRefBS.inline.hpp"
30
31 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
32 // enumerate ref fields that have been modified (since the last
33 // enumeration.)
34
35 void CardTableModRefBS::write_region(MemRegion mr) {
36 _card_table->dirty_MemRegion(mr);
37 }
38
39 void CardTableModRefBS::write_ref_array_region(MemRegion mr) {
40 _card_table->dirty_MemRegion(mr);
41 }
42
43 BarrierSetCodeGen* CardTableModRefBS::make_code_gen() {
44 return new CardTableModRefBSCodeGen();
45 }
46
47 C1BarrierSetCodeGen* CardTableModRefBS::make_c1_code_gen() {
48 return new C1CardTableModRefBSCodeGen();
49 }
50
51 C2BarrierSetCodeGen* CardTableModRefBS::make_c2_code_gen() {
52 return new C2CardTableModRefBSCodeGen();
53 }
54
55 void CardTableModRefBS::initialize() {
56 ModRefBarrierSet::initialize();
57 // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
58 // otherwise remains unused.
59 #if defined(COMPILER2) || INCLUDE_JVMCI
60 _can_elide_tlab_store_barriers = can_elide_tlab_store_barriers();
61 _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && _can_elide_tlab_store_barriers
62 && (DeferInitialCardMark || card_mark_must_follow_store());
63 #else
64 assert(_defer_initial_card_mark == false, "Who would set it?");
65 assert(_can_elide_tlab_store_barriers == false, "Who would set it?");
66 #endif
67 }
68
69 CardTableModRefBS::CardTableModRefBS(
70 CardTable* card_table,
71 const BarrierSet::FakeRtti& fake_rtti) :
72 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
73 _defer_initial_card_mark(false),
74 _can_elide_tlab_store_barriers(false),
75 _card_table(card_table)
76 {}
77
78 CardTableModRefBS::CardTableModRefBS(CardTable* card_table) :
79 ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)),
80 _defer_initial_card_mark(false),
81 _can_elide_tlab_store_barriers(false),
82 _card_table(card_table)
83 {}
84
85 CardTableModRefBS::~CardTableModRefBS() {
86 delete _card_table;
87 }
88
89 void CardTableModRefBS::invalidate(MemRegion mr) {
90 _card_table->invalidate(mr);
91 }
92
93 void CardTableModRefBS::print_on(outputStream* st) const {
94 _card_table->print_on(st);
95 }
96
97 // Helper for ReduceInitialCardMarks. For performance,
98 // compiled code may elide card-marks for initializing stores
99 // to a newly allocated object along the fast-path. We
100 // compensate for such elided card-marks as follows:
101 // (a) Generational, non-concurrent collectors, such as
102 // GenCollectedHeap(ParNew,DefNew,Tenured) and
103 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
104 // need the card-mark if and only if the region is
105 // in the old gen, and do not care if the card-mark
106 // succeeds or precedes the initializing stores themselves,
107 // so long as the card-mark is completed before the next
108 // scavenge. For all these cases, we can do a card mark
109 // at the point at which we do a slow path allocation
110 // in the old gen, i.e. in this call.
111 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
112 // in addition that the card-mark for an old gen allocated
113 // object strictly follow any associated initializing stores.
114 // In these cases, the memRegion remembered below is
115 // used to card-mark the entire region either just before the next
116 // slow-path allocation by this thread or just before the next scavenge or
117 // CMS-associated safepoint, whichever of these events happens first.
118 // (The implicit assumption is that the object has been fully
119 // initialized by this point, a fact that we assert when doing the
120 // card-mark.)
121 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
122 // G1 concurrent marking is in progress an SATB (pre-write-)barrier
123 // is used to remember the pre-value of any store. Initializing
124 // stores will not need this barrier, so we need not worry about
125 // compensating for the missing pre-barrier here. Turning now
126 // to the post-barrier, we note that G1 needs a RS update barrier
127 // which simply enqueues a (sequence of) dirty cards which may
128 // optionally be refined by the concurrent update threads. Note
129 // that this barrier need only be applied to a non-young write,
130 // but, like in CMS, because of the presence of concurrent refinement
131 // (much like CMS' precleaning), must strictly follow the oop-store.
132 // Thus, using the same protocol for maintaining the intended
133 // invariants turns out, serendepitously, to be the same for both
134 // G1 and CMS.
135 //
136 // For any future collector, this code should be reexamined with
137 // that specific collector in mind, and the documentation above suitably
138 // extended and updated.
139 void CardTableModRefBS::new_deferred_store_barrier(JavaThread* thread, oop new_obj) {
140 // If a previous card-mark was deferred, flush it now.
141 flush_deferred_store_barrier(thread);
142 if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
143 // Arrays of non-references don't need a post-barrier.
144 // The deferred_card_mark region should be empty
145 // following the flush above.
146 assert(thread->deferred_card_mark().is_empty(), "Error");
147 } else {
148 MemRegion mr((HeapWord*)new_obj, new_obj->size());
149 assert(!mr.is_empty(), "Error");
150 if (_defer_initial_card_mark) {
151 // Defer the card mark
152 thread->set_deferred_card_mark(mr);
153 } else {
154 // Do the card mark
155 write_region(mr);
156 }
157 }
158 }
159
160 void CardTableModRefBS::flush_deferred_store_barrier(JavaThread* thread) {
161 MemRegion deferred = thread->deferred_card_mark();
162 if (!deferred.is_empty()) {
163 assert(_defer_initial_card_mark, "Otherwise should be empty");
164 {
165 // Verify that the storage points to a parsable object in heap
166 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
167 assert(!_card_table->is_in_young(old_obj),
168 "Else should have been filtered in new_deferred_store_barrier()");
169 assert(old_obj->is_oop(true), "Not an oop");
170 assert(deferred.word_size() == (size_t)(old_obj->size()),
171 "Mismatch: multiple objects?");
172 }
173 write_region(deferred);
174 // "Clear" the deferred_card_mark field
175 thread->set_deferred_card_mark(MemRegion());
176 }
177 assert(thread->deferred_card_mark().is_empty(), "invariant");
178 }
179
180 void CardTableModRefBS::on_destroy_thread(JavaThread* thread) {
181 ModRefBarrierSet::on_destroy_thread(thread);
182 flush_deferred_store_barrier(thread);
183 }
184
185 void CardTableModRefBS::make_parsable(JavaThread* thread) {
186 ModRefBarrierSet::make_parsable(thread);
187 #if defined(COMPILER2) || INCLUDE_JVMCI
188 // The deferred store barriers must all have been flushed to the
189 // card-table (or other remembered set structure) before GC starts
190 // processing the card-table (or other remembered set).
191 if (_defer_initial_card_mark) flush_deferred_store_barrier(thread);
192 #else
193 assert(!_defer_initial_card_mark, "Should be false");
194 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
195 #endif
196 }
197
198 bool CardTableModRefBS::card_mark_must_follow_store() const {
199 return _card_table->scanned_concurrently();
200 }
|