25 #include "precompiled.hpp"
26 #include "gc/shared/cardTableModRefBS.inline.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/genCollectedHeap.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "logging/log.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/macros.hpp"
34
35 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
36 // enumerate ref fields that have been modified (since the last
37 // enumeration.)
38
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "uninitialized, check declaration order");
43 assert(_page_size != 0, "uninitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
47
48 CardTableModRefBS::CardTableModRefBS(
49 MemRegion whole_heap,
50 const BarrierSet::FakeRtti& fake_rtti) :
51 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
52 _whole_heap(whole_heap),
53 _guard_index(0),
54 _guard_region(),
55 _last_valid_index(0),
56 _page_size(os::vm_page_size()),
57 _byte_map_size(0),
58 _covered(NULL),
59 _committed(NULL),
60 _cur_covered_regions(0),
61 _byte_map(NULL),
62 byte_map_base(NULL)
63 {
64 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
65 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
93
94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
95
96 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
97 _page_size, heap_rs.base(), heap_rs.size());
98 if (!heap_rs.is_reserved()) {
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
103 // The assembler store_check code will do an unsigned shift of the oop,
104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
117 *guard_card = last_card;
118
119 log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
120 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
121 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
122 log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
123 }
124
125 CardTableModRefBS::~CardTableModRefBS() {
126 if (_covered) {
127 delete[] _covered;
128 _covered = NULL;
129 }
130 if (_committed) {
131 delete[] _committed;
132 _committed = NULL;
133 }
135
136 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
137 int i;
138 for (i = 0; i < _cur_covered_regions; i++) {
139 if (_covered[i].start() == base) return i;
140 if (_covered[i].start() > base) break;
141 }
142 // If we didn't find it, create a new one.
143 assert(_cur_covered_regions < _max_covered_regions,
144 "too many covered regions");
145 // Move the ones above up, to maintain sorted order.
146 for (int j = _cur_covered_regions; j > i; j--) {
147 _covered[j] = _covered[j-1];
148 _committed[j] = _committed[j-1];
149 }
150 int res = i;
151 _cur_covered_regions++;
152 _covered[res].set_start(base);
153 _covered[res].set_word_size(0);
154 jbyte* ct_start = byte_for(base);
155 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
156 _committed[res].set_start((HeapWord*)ct_start_aligned);
157 _committed[res].set_word_size(0);
158 return res;
159 }
160
161 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
162 for (int i = 0; i < _cur_covered_regions; i++) {
163 if (_covered[i].contains(addr)) {
164 return i;
165 }
166 }
167 assert(0, "address outside of heap?");
168 return -1;
169 }
170
171 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
172 HeapWord* max_end = NULL;
173 for (int j = 0; j < ind; j++) {
174 HeapWord* this_end = _committed[j].end();
175 if (this_end > max_end) max_end = this_end;
195 assert(_whole_heap.contains(new_region),
196 "attempt to cover area not in reserved area");
197 debug_only(verify_guard();)
198 // collided is true if the expansion would push into another committed region
199 debug_only(bool collided = false;)
200 int const ind = find_covering_region_by_base(new_region.start());
201 MemRegion const old_region = _covered[ind];
202 assert(old_region.start() == new_region.start(), "just checking");
203 if (new_region.word_size() != old_region.word_size()) {
204 // Commit new or uncommit old pages, if necessary.
205 MemRegion cur_committed = _committed[ind];
206 // Extend the end of this _committed region
207 // to cover the end of any lower _committed regions.
208 // This forms overlapping regions, but never interior regions.
209 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
210 if (max_prev_end > cur_committed.end()) {
211 cur_committed.set_end(max_prev_end);
212 }
213 // Align the end up to a page size (starts are already aligned).
214 jbyte* const new_end = byte_after(new_region.last());
215 HeapWord* new_end_aligned = (HeapWord*) align_ptr_up(new_end, _page_size);
216 assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
217 // Check the other regions (excludes "ind") to ensure that
218 // the new_end_aligned does not intrude onto the committed
219 // space of another region.
220 int ri = 0;
221 for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
222 if (new_end_aligned > _committed[ri].start()) {
223 assert(new_end_aligned <= _committed[ri].end(),
224 "An earlier committed region can't cover a later committed region");
225 // Any region containing the new end
226 // should start at or beyond the region found (ind)
227 // for the new end (committed regions are not expected to
228 // be proper subsets of other committed regions).
229 assert(_committed[ri].start() >= _committed[ind].start(),
230 "New end of committed region is inconsistent");
231 new_end_aligned = _committed[ri].start();
232 // new_end_aligned can be equal to the start of its
233 // committed region (i.e., of "ind") if a second
234 // region following "ind" also start at the same location
235 // as "ind".
351 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
352 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
353 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
354 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
355
356 // Touch the last card of the covered region to show that it
357 // is committed (or SEGV).
358 debug_only((void) (*byte_for(_covered[ind].last()));)
359 debug_only(verify_guard();)
360 }
361
362 // Note that these versions are precise! The scanning code has to handle the
363 // fact that the write barrier may be either precise or imprecise.
364
365 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
366 inline_write_ref_field(field, newVal, release);
367 }
368
369
370 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
371 assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
372 assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
373 jbyte* cur = byte_for(mr.start());
374 jbyte* last = byte_after(mr.last());
375 while (cur < last) {
376 *cur = dirty_card;
377 cur++;
378 }
379 }
380
381 void CardTableModRefBS::invalidate(MemRegion mr) {
382 assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
383 assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
384 for (int i = 0; i < _cur_covered_regions; i++) {
385 MemRegion mri = mr.intersection(_covered[i]);
386 if (!mri.is_empty()) dirty_MemRegion(mri);
387 }
388 }
389
390 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
391 // Be conservative: only clean cards entirely contained within the
392 // region.
393 jbyte* cur;
394 if (mr.start() == _whole_heap.start()) {
395 cur = byte_for(mr.start());
396 } else {
397 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
398 cur = byte_after(mr.start() - 1);
399 }
400 jbyte* last = byte_after(mr.last());
401 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
402 }
403
|
25 #include "precompiled.hpp"
26 #include "gc/shared/cardTableModRefBS.inline.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/genCollectedHeap.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "logging/log.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/macros.hpp"
34
35 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
36 // enumerate ref fields that have been modified (since the last
37 // enumeration.)
38
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "uninitialized, check declaration order");
43 assert(_page_size != 0, "uninitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
47
48 CardTableModRefBS::CardTableModRefBS(
49 MemRegion whole_heap,
50 const BarrierSet::FakeRtti& fake_rtti) :
51 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
52 _whole_heap(whole_heap),
53 _guard_index(0),
54 _guard_region(),
55 _last_valid_index(0),
56 _page_size(os::vm_page_size()),
57 _byte_map_size(0),
58 _covered(NULL),
59 _committed(NULL),
60 _cur_covered_regions(0),
61 _byte_map(NULL),
62 byte_map_base(NULL)
63 {
64 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
65 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
93
94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
95
96 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
97 _page_size, heap_rs.base(), heap_rs.size());
98 if (!heap_rs.is_reserved()) {
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
103 // The assembler store_check code will do an unsigned shift of the oop,
104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
117 *guard_card = last_card;
118
119 log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
120 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
121 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
122 log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
123 }
124
125 CardTableModRefBS::~CardTableModRefBS() {
126 if (_covered) {
127 delete[] _covered;
128 _covered = NULL;
129 }
130 if (_committed) {
131 delete[] _committed;
132 _committed = NULL;
133 }
135
136 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
137 int i;
138 for (i = 0; i < _cur_covered_regions; i++) {
139 if (_covered[i].start() == base) return i;
140 if (_covered[i].start() > base) break;
141 }
142 // If we didn't find it, create a new one.
143 assert(_cur_covered_regions < _max_covered_regions,
144 "too many covered regions");
145 // Move the ones above up, to maintain sorted order.
146 for (int j = _cur_covered_regions; j > i; j--) {
147 _covered[j] = _covered[j-1];
148 _committed[j] = _committed[j-1];
149 }
150 int res = i;
151 _cur_covered_regions++;
152 _covered[res].set_start(base);
153 _covered[res].set_word_size(0);
154 jbyte* ct_start = byte_for(base);
155 uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
156 _committed[res].set_start((HeapWord*)ct_start_aligned);
157 _committed[res].set_word_size(0);
158 return res;
159 }
160
161 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
162 for (int i = 0; i < _cur_covered_regions; i++) {
163 if (_covered[i].contains(addr)) {
164 return i;
165 }
166 }
167 assert(0, "address outside of heap?");
168 return -1;
169 }
170
171 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
172 HeapWord* max_end = NULL;
173 for (int j = 0; j < ind; j++) {
174 HeapWord* this_end = _committed[j].end();
175 if (this_end > max_end) max_end = this_end;
195 assert(_whole_heap.contains(new_region),
196 "attempt to cover area not in reserved area");
197 debug_only(verify_guard();)
198 // collided is true if the expansion would push into another committed region
199 debug_only(bool collided = false;)
200 int const ind = find_covering_region_by_base(new_region.start());
201 MemRegion const old_region = _covered[ind];
202 assert(old_region.start() == new_region.start(), "just checking");
203 if (new_region.word_size() != old_region.word_size()) {
204 // Commit new or uncommit old pages, if necessary.
205 MemRegion cur_committed = _committed[ind];
206 // Extend the end of this _committed region
207 // to cover the end of any lower _committed regions.
208 // This forms overlapping regions, but never interior regions.
209 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
210 if (max_prev_end > cur_committed.end()) {
211 cur_committed.set_end(max_prev_end);
212 }
213 // Align the end up to a page size (starts are already aligned).
214 jbyte* const new_end = byte_after(new_region.last());
215 HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
216 assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
217 // Check the other regions (excludes "ind") to ensure that
218 // the new_end_aligned does not intrude onto the committed
219 // space of another region.
220 int ri = 0;
221 for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
222 if (new_end_aligned > _committed[ri].start()) {
223 assert(new_end_aligned <= _committed[ri].end(),
224 "An earlier committed region can't cover a later committed region");
225 // Any region containing the new end
226 // should start at or beyond the region found (ind)
227 // for the new end (committed regions are not expected to
228 // be proper subsets of other committed regions).
229 assert(_committed[ri].start() >= _committed[ind].start(),
230 "New end of committed region is inconsistent");
231 new_end_aligned = _committed[ri].start();
232 // new_end_aligned can be equal to the start of its
233 // committed region (i.e., of "ind") if a second
234 // region following "ind" also start at the same location
235 // as "ind".
351 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
352 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
353 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
354 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
355
356 // Touch the last card of the covered region to show that it
357 // is committed (or SEGV).
358 debug_only((void) (*byte_for(_covered[ind].last()));)
359 debug_only(verify_guard();)
360 }
361
362 // Note that these versions are precise! The scanning code has to handle the
363 // fact that the write barrier may be either precise or imprecise.
364
365 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
366 inline_write_ref_field(field, newVal, release);
367 }
368
369
370 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
371 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
372 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
373 jbyte* cur = byte_for(mr.start());
374 jbyte* last = byte_after(mr.last());
375 while (cur < last) {
376 *cur = dirty_card;
377 cur++;
378 }
379 }
380
381 void CardTableModRefBS::invalidate(MemRegion mr) {
382 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
383 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
384 for (int i = 0; i < _cur_covered_regions; i++) {
385 MemRegion mri = mr.intersection(_covered[i]);
386 if (!mri.is_empty()) dirty_MemRegion(mri);
387 }
388 }
389
390 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
391 // Be conservative: only clean cards entirely contained within the
392 // region.
393 jbyte* cur;
394 if (mr.start() == _whole_heap.start()) {
395 cur = byte_for(mr.start());
396 } else {
397 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
398 cur = byte_after(mr.start() - 1);
399 }
400 jbyte* last = byte_after(mr.last());
401 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
402 }
403
|