110 }
111
112 bool CMBitMap::allocate(ReservedSpace heap_rs) {
113 _bmStartWord = (HeapWord*)(heap_rs.base());
114 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
115 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
116 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
117 if (!brs.is_reserved()) {
118 warning("ConcurrentMark marking bit map allocation failure");
119 return false;
120 }
121 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
122 // For now we'll just commit all of the bit map up front.
123 // Later on we'll try to be more parsimonious with swap.
124 if (!_virtual_space.initialize(brs, brs.size())) {
125 warning("ConcurrentMark marking bit map backing store failure");
126 return false;
127 }
128 assert(_virtual_space.committed_size() == brs.size(),
129 "didn't reserve backing store for all of concurrent marking bit map?");
130 _bm.set_map((uintptr_t*)_virtual_space.low());
131 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
132 _bmWordSize, "inconsistency in bit map sizing");
133 _bm.set_size(_bmWordSize >> _shifter);
134 return true;
135 }
136
137 void CMBitMap::clearAll() {
138 _bm.clear();
139 return;
140 }
141
142 void CMBitMap::markRange(MemRegion mr) {
143 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
144 assert(!mr.is_empty(), "unexpected empty region");
145 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
146 ((HeapWord *) mr.end())),
147 "markRange memory region end is not card aligned");
148 // convert address range into offset range
149 _bm.at_put_range(heapWordToOffset(mr.start()),
150 heapWordToOffset(mr.end()), true);
|
110 }
111
112 bool CMBitMap::allocate(ReservedSpace heap_rs) {
113 _bmStartWord = (HeapWord*)(heap_rs.base());
114 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
115 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
116 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
117 if (!brs.is_reserved()) {
118 warning("ConcurrentMark marking bit map allocation failure");
119 return false;
120 }
121 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
122 // For now we'll just commit all of the bit map up front.
123 // Later on we'll try to be more parsimonious with swap.
124 if (!_virtual_space.initialize(brs, brs.size())) {
125 warning("ConcurrentMark marking bit map backing store failure");
126 return false;
127 }
128 assert(_virtual_space.committed_size() == brs.size(),
129 "didn't reserve backing store for all of concurrent marking bit map?");
130 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
131 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
132 _bmWordSize, "inconsistency in bit map sizing");
133 _bm.set_size(_bmWordSize >> _shifter);
134 return true;
135 }
136
137 void CMBitMap::clearAll() {
138 _bm.clear();
139 return;
140 }
141
142 void CMBitMap::markRange(MemRegion mr) {
143 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
144 assert(!mr.is_empty(), "unexpected empty region");
145 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
146 ((HeapWord *) mr.end())),
147 "markRange memory region end is not card aligned");
148 // convert address range into offset range
149 _bm.at_put_range(heapWordToOffset(mr.start()),
150 heapWordToOffset(mr.end()), true);
|