62 }
63
64 CardTable::~CardTable() {
65 MemRegion::destroy_array(_covered, _max_covered_regions);
66 MemRegion::destroy_array(_committed, _max_covered_regions);
67 }
68
69 void CardTable::initialize() {
70 _guard_index = cards_required(_whole_heap.word_size()) - 1;
71 _last_valid_index = _guard_index - 1;
72
73 _byte_map_size = compute_byte_map_size();
74
75 HeapWord* low_bound = _whole_heap.start();
76 HeapWord* high_bound = _whole_heap.end();
77
78 _cur_covered_regions = 0;
79
80 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
81 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
82 ReservedSpace heap_rs(_byte_map_size, rs_align);
83
84 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
85
86 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
87 _page_size, heap_rs.base(), heap_rs.size());
88 if (!heap_rs.is_reserved()) {
89 vm_exit_during_initialization("Could not reserve enough space for the "
90 "card marking array");
91 }
92
93 // The assembler store_check code will do an unsigned shift of the oop,
94 // then add it to _byte_map_base, i.e.
95 //
96 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
97 _byte_map = (CardValue*) heap_rs.base();
98 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
99 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
100 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
101
102 CardValue* guard_card = &_byte_map[_guard_index];
|
62 }
63
64 CardTable::~CardTable() {
65 MemRegion::destroy_array(_covered, _max_covered_regions);
66 MemRegion::destroy_array(_committed, _max_covered_regions);
67 }
68
69 void CardTable::initialize() {
70 _guard_index = cards_required(_whole_heap.word_size()) - 1;
71 _last_valid_index = _guard_index - 1;
72
73 _byte_map_size = compute_byte_map_size();
74
75 HeapWord* low_bound = _whole_heap.start();
76 HeapWord* high_bound = _whole_heap.end();
77
78 _cur_covered_regions = 0;
79
80 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
81 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
82 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
83
84 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
85
86 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
87 _page_size, heap_rs.base(), heap_rs.size());
88 if (!heap_rs.is_reserved()) {
89 vm_exit_during_initialization("Could not reserve enough space for the "
90 "card marking array");
91 }
92
93 // The assembler store_check code will do an unsigned shift of the oop,
94 // then add it to _byte_map_base, i.e.
95 //
96 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
97 _byte_map = (CardValue*) heap_rs.base();
98 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
99 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
100 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
101
102 CardValue* guard_card = &_byte_map[_guard_index];
|