11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/cardTableModRefBS.inline.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/genCollectedHeap.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "services/memTracker.hpp"
32 #include "utilities/macros.hpp"
33
34 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
35 // enumerate ref fields that have been modified (since the last
36 // enumeration.)
37
38 size_t CardTableModRefBS::compute_byte_map_size()
39 {
40 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
41 "uninitialized, check declaration order");
42 assert(_page_size != 0, "uninitialized, check declaration order");
43 const size_t granularity = os::vm_allocation_granularity();
44 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
45 }
46
47 CardTableModRefBS::CardTableModRefBS(
48 MemRegion whole_heap,
49 const BarrierSet::FakeRtti& fake_rtti) :
50 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
98 vm_exit_during_initialization("Could not reserve enough space for the "
99 "card marking array");
100 }
101
102 // The assembler store_check code will do an unsigned shift of the oop,
103 // then add it to byte_map_base, i.e.
104 //
105 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
106 _byte_map = (jbyte*) heap_rs.base();
107 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
108 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
109 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
110
111 jbyte* guard_card = &_byte_map[_guard_index];
112 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
113 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
114 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
115 !ExecMem, "card table last card");
116 *guard_card = last_card;
117
118 if (TraceCardTableModRefBS) {
119 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
120 gclog_or_tty->print_cr(" "
121 " &_byte_map[0]: " INTPTR_FORMAT
122 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
123 p2i(&_byte_map[0]),
124 p2i(&_byte_map[_last_valid_index]));
125 gclog_or_tty->print_cr(" "
126 " byte_map_base: " INTPTR_FORMAT,
127 p2i(byte_map_base));
128 }
129 }
130
131 CardTableModRefBS::~CardTableModRefBS() {
132 if (_covered) {
133 delete[] _covered;
134 _covered = NULL;
135 }
136 if (_committed) {
137 delete[] _committed;
138 _committed = NULL;
139 }
140 }
141
142 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
143 int i;
144 for (i = 0; i < _cur_covered_regions; i++) {
145 if (_covered[i].start() == base) return i;
146 if (_covered[i].start() > base) break;
147 }
148 // If we didn't find it, create a new one.
333 "Expect to be beyond new region unless impacting another region");
334 // do nothing if we resized downward.
335 #ifdef ASSERT
336 for (int ri = 0; ri < _cur_covered_regions; ri++) {
337 if (ri != ind) {
338 // The end of the new committed region should not
339 // be in any existing region unless it matches
340 // the start of the next region.
341 assert(!_committed[ri].contains(end) ||
342 (_committed[ri].start() == (HeapWord*) end),
343 "Overlapping committed regions");
344 }
345 }
346 #endif
347 if (entry < end) {
348 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
349 }
350 }
351 // In any case, the covered size changes.
352 _covered[ind].set_word_size(new_region.word_size());
353 if (TraceCardTableModRefBS) {
354 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
355 gclog_or_tty->print_cr(" "
356 " _covered[%d].start(): " INTPTR_FORMAT
357 " _covered[%d].last(): " INTPTR_FORMAT,
358 ind, p2i(_covered[ind].start()),
359 ind, p2i(_covered[ind].last()));
360 gclog_or_tty->print_cr(" "
361 " _committed[%d].start(): " INTPTR_FORMAT
362 " _committed[%d].last(): " INTPTR_FORMAT,
363 ind, p2i(_committed[ind].start()),
364 ind, p2i(_committed[ind].last()));
365 gclog_or_tty->print_cr(" "
366 " byte_for(start): " INTPTR_FORMAT
367 " byte_for(last): " INTPTR_FORMAT,
368 p2i(byte_for(_covered[ind].start())),
369 p2i(byte_for(_covered[ind].last())));
370 gclog_or_tty->print_cr(" "
371 " addr_for(start): " INTPTR_FORMAT
372 " addr_for(last): " INTPTR_FORMAT,
373 p2i(addr_for((jbyte*) _committed[ind].start())),
374 p2i(addr_for((jbyte*) _committed[ind].last())));
375 }
376 // Touch the last card of the covered region to show that it
377 // is committed (or SEGV).
378 debug_only((void) (*byte_for(_covered[ind].last()));)
379 debug_only(verify_guard();)
380 }
381
382 // Note that these versions are precise! The scanning code has to handle the
383 // fact that the write barrier may be either precise or imprecise.
384
385 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
386 inline_write_ref_field(field, newVal, release);
387 }
388
389
390 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
391 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
392 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
393 jbyte* cur = byte_for(mr.start());
394 jbyte* last = byte_after(mr.last());
395 while (cur < last) {
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/cardTableModRefBS.inline.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/genCollectedHeap.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "logging/log.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/macros.hpp"
34
35 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
36 // enumerate ref fields that have been modified (since the last
37 // enumeration.)
38
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "uninitialized, check declaration order");
43 assert(_page_size != 0, "uninitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
47
48 CardTableModRefBS::CardTableModRefBS(
49 MemRegion whole_heap,
50 const BarrierSet::FakeRtti& fake_rtti) :
51 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
103 // The assembler store_check code will do an unsigned shift of the oop,
104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
117 *guard_card = last_card;
118
119 log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
120 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
121 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
122 log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
123 }
124
125 CardTableModRefBS::~CardTableModRefBS() {
126 if (_covered) {
127 delete[] _covered;
128 _covered = NULL;
129 }
130 if (_committed) {
131 delete[] _committed;
132 _committed = NULL;
133 }
134 }
135
136 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
137 int i;
138 for (i = 0; i < _cur_covered_regions; i++) {
139 if (_covered[i].start() == base) return i;
140 if (_covered[i].start() > base) break;
141 }
142 // If we didn't find it, create a new one.
327 "Expect to be beyond new region unless impacting another region");
328 // do nothing if we resized downward.
329 #ifdef ASSERT
330 for (int ri = 0; ri < _cur_covered_regions; ri++) {
331 if (ri != ind) {
332 // The end of the new committed region should not
333 // be in any existing region unless it matches
334 // the start of the next region.
335 assert(!_committed[ri].contains(end) ||
336 (_committed[ri].start() == (HeapWord*) end),
337 "Overlapping committed regions");
338 }
339 }
340 #endif
341 if (entry < end) {
342 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
343 }
344 }
345 // In any case, the covered size changes.
346 _covered[ind].set_word_size(new_region.word_size());
347
348 log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
349 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
350 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
351 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
352 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
353 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
354 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
355 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
356 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
357
358 // Touch the last card of the covered region to show that it
359 // is committed (or SEGV).
360 debug_only((void) (*byte_for(_covered[ind].last()));)
361 debug_only(verify_guard();)
362 }
363
364 // Note that these versions are precise! The scanning code has to handle the
365 // fact that the write barrier may be either precise or imprecise.
366
367 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
368 inline_write_ref_field(field, newVal, release);
369 }
370
371
372 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
373 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
374 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
375 jbyte* cur = byte_for(mr.start());
376 jbyte* last = byte_after(mr.last());
377 while (cur < last) {
|