76 _guard_index = cards_required(_whole_heap.word_size()) - 1;
77 _last_valid_index = _guard_index - 1;
78
79 _byte_map_size = compute_byte_map_size();
80
81 HeapWord* low_bound = _whole_heap.start();
82 HeapWord* high_bound = _whole_heap.end();
83
84 _cur_covered_regions = 0;
85 _committed = new MemRegion[_max_covered_regions];
86 if (_committed == NULL) {
87 vm_exit_during_initialization("Could not allocate card table committed region set.");
88 }
89
90 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
91 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
92 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
93
94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
95
96 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
97 _page_size, heap_rs.base(), heap_rs.size());
98 if (!heap_rs.is_reserved()) {
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
103 // The assembler store_check code will do an unsigned shift of the oop,
104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
|
76 _guard_index = cards_required(_whole_heap.word_size()) - 1;
77 _last_valid_index = _guard_index - 1;
78
79 _byte_map_size = compute_byte_map_size();
80
81 HeapWord* low_bound = _whole_heap.start();
82 HeapWord* high_bound = _whole_heap.end();
83
84 _cur_covered_regions = 0;
85 _committed = new MemRegion[_max_covered_regions];
86 if (_committed == NULL) {
87 vm_exit_during_initialization("Could not allocate card table committed region set.");
88 }
89
90 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
91 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
92 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
93
94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
95
96 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
97 _page_size, heap_rs.base(), heap_rs.size());
98 if (!heap_rs.is_reserved()) {
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
103 // The assembler store_check code will do an unsigned shift of the oop,
104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
|