8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
27
28 #include "memory/memRegion.hpp"
29 #include "memory/virtualspace.hpp"
30 #include "utilities/globalDefinitions.hpp"
31
32 // The CollectedHeap type requires subtypes to implement a method
33 // "block_start". For some subtypes, notably generational
34 // systems using card-table-based write barriers, the efficiency of this
35 // operation may be important. Implementations of the "BlockOffsetArray"
36 // class may be useful in providing such efficient implementations.
37 //
38 // BlockOffsetTable (abstract)
39 // - BlockOffsetArray (abstract)
40 // - BlockOffsetArrayNonContigSpace
41 // - BlockOffsetArrayContigSpace
42 //
43
44 class ContiguousSpace;
45
46 //////////////////////////////////////////////////////////////////////////
47 // The BlockOffsetTable "interface"
48 //////////////////////////////////////////////////////////////////////////
49 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
50 friend class VMStructs;
109 enum SomePrivateConstants {
110 LogN = 9,
111 LogN_words = LogN - LogHeapWordSize,
112 N_bytes = 1 << LogN,
113 N_words = 1 << LogN_words
114 };
115
116 bool _init_to_zero;
117
118 // The reserved region covered by the shared array.
119 MemRegion _reserved;
120
121 // End of the current committed region.
122 HeapWord* _end;
123
124 // Array for keeping offsets for retrieving object start fast given an
125 // address.
126 VirtualSpace _vs;
127 u_char* _offset_array; // byte array keeping backwards offsets
128
129 protected:
130 // Bounds checking accessors:
131 // For performance these have to devolve to array accesses in product builds.
132 u_char offset_array(size_t index) const {
133 assert(index < _vs.committed_size(), "index out of range");
134 return _offset_array[index];
135 }
136 // An assertion-checking helper method for the set_offset_array() methods below.
137 void check_reducing_assertion(bool reducing);
138
139 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
140 check_reducing_assertion(reducing);
141 assert(index < _vs.committed_size(), "index out of range");
142 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
143 _offset_array[index] = offset;
144 }
145
146 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
147 check_reducing_assertion(reducing);
148 assert(index < _vs.committed_size(), "index out of range");
149 assert(high >= low, "addresses out of order");
150 assert(pointer_delta(high, low) <= N_words, "offset too large");
151 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
152 "Not reducing");
153 _offset_array[index] = (u_char)pointer_delta(high, low);
154 }
155
156 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
157 check_reducing_assertion(reducing);
158 assert(index_for(right - 1) < _vs.committed_size(),
159 "right address out of range");
160 assert(left < right, "Heap addresses out of order");
161 size_t num_cards = pointer_delta(right, left) >> LogN_words;
162
163 // Below, we may use an explicit loop instead of memset()
164 // because on certain platforms memset() can give concurrent
165 // readers "out-of-thin-air," phantom zeros; see 6948537.
166 if (UseMemSetInBOT) {
167 memset(&_offset_array[index_for(left)], offset, num_cards);
168 } else {
169 size_t i = index_for(left);
170 const size_t end = i + num_cards;
171 for (; i < end; i++) {
172 // Elided until CR 6977974 is fixed properly.
173 // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
174 _offset_array[i] = offset;
175 }
176 }
177 }
178
179 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
180 check_reducing_assertion(reducing);
181 assert(right < _vs.committed_size(), "right address out of range");
182 assert(left <= right, "indexes out of order");
183 size_t num_cards = right - left + 1;
184
185 // Below, we may use an explicit loop instead of memset
186 // because on certain platforms memset() can give concurrent
187 // readers "out-of-thin-air," phantom zeros; see 6948537.
188 if (UseMemSetInBOT) {
189 memset(&_offset_array[left], offset, num_cards);
190 } else {
191 size_t i = left;
192 const size_t end = i + num_cards;
193 for (; i < end; i++) {
194 // Elided until CR 6977974 is fixed properly.
195 // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
196 _offset_array[i] = offset;
197 }
198 }
199 }
200
201 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
202 assert(index < _vs.committed_size(), "index out of range");
203 assert(high >= low, "addresses out of order");
204 assert(pointer_delta(high, low) <= N_words, "offset too large");
205 assert(_offset_array[index] == pointer_delta(high, low),
206 "Wrong offset");
207 }
208
209 bool is_card_boundary(HeapWord* p) const;
210
211 // Return the number of slots needed for an offset array
212 // that covers mem_region_words words.
213 // We always add an extra slot because if an object
214 // ends on a card boundary we put a 0 in the next
215 // offset array slot, so we want that slot always
216 // to be reserved.
217
218 size_t compute_size(size_t mem_region_words) {
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
27
28 #include "gc/shared/memset_with_concurrent_readers.hpp"
29 #include "memory/memRegion.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "runtime/globals.hpp"
32 #include "utilities/globalDefinitions.hpp"
33 #include "utilities/macros.hpp"
34
35 // The CollectedHeap type requires subtypes to implement a method
36 // "block_start". For some subtypes, notably generational
37 // systems using card-table-based write barriers, the efficiency of this
38 // operation may be important. Implementations of the "BlockOffsetArray"
39 // class may be useful in providing such efficient implementations.
40 //
41 // BlockOffsetTable (abstract)
42 // - BlockOffsetArray (abstract)
43 // - BlockOffsetArrayNonContigSpace
44 // - BlockOffsetArrayContigSpace
45 //
46
47 class ContiguousSpace;
48
49 //////////////////////////////////////////////////////////////////////////
50 // The BlockOffsetTable "interface"
51 //////////////////////////////////////////////////////////////////////////
52 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
53 friend class VMStructs;
112 enum SomePrivateConstants {
113 LogN = 9,
114 LogN_words = LogN - LogHeapWordSize,
115 N_bytes = 1 << LogN,
116 N_words = 1 << LogN_words
117 };
118
119 bool _init_to_zero;
120
121 // The reserved region covered by the shared array.
122 MemRegion _reserved;
123
124 // End of the current committed region.
125 HeapWord* _end;
126
127 // Array for keeping offsets for retrieving object start fast given an
128 // address.
129 VirtualSpace _vs;
130 u_char* _offset_array; // byte array keeping backwards offsets
131
132 void fill_range(size_t start, size_t num_cards, u_char offset) {
133 void* start_ptr = &_offset_array[start];
134 #if INCLUDE_ALL_GCS
135 // If collector is concurrent, special handling may be needed.
136 assert(!UseG1GC, "Shouldn't be here when using G1");
137 if (UseConcMarkSweepGC) {
138 memset_with_concurrent_readers(start_ptr, offset, num_cards);
139 return;
140 }
141 #endif // INCLUDE_ALL_GCS
142 memset(start_ptr, offset, num_cards);
143 }
144
145 protected:
146 // Bounds checking accessors:
147 // For performance these have to devolve to array accesses in product builds.
148 u_char offset_array(size_t index) const {
149 assert(index < _vs.committed_size(), "index out of range");
150 return _offset_array[index];
151 }
152 // An assertion-checking helper method for the set_offset_array() methods below.
153 void check_reducing_assertion(bool reducing);
154
155 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
156 check_reducing_assertion(reducing);
157 assert(index < _vs.committed_size(), "index out of range");
158 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
159 _offset_array[index] = offset;
160 }
161
162 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
163 check_reducing_assertion(reducing);
164 assert(index < _vs.committed_size(), "index out of range");
165 assert(high >= low, "addresses out of order");
166 assert(pointer_delta(high, low) <= N_words, "offset too large");
167 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
168 "Not reducing");
169 _offset_array[index] = (u_char)pointer_delta(high, low);
170 }
171
172 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
173 check_reducing_assertion(reducing);
174 assert(index_for(right - 1) < _vs.committed_size(),
175 "right address out of range");
176 assert(left < right, "Heap addresses out of order");
177 size_t num_cards = pointer_delta(right, left) >> LogN_words;
178
179 fill_range(index_for(left), num_cards, offset);
180 }
181
182 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
183 check_reducing_assertion(reducing);
184 assert(right < _vs.committed_size(), "right address out of range");
185 assert(left <= right, "indexes out of order");
186 size_t num_cards = right - left + 1;
187
188 fill_range(left, num_cards, offset);
189 }
190
191 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
192 assert(index < _vs.committed_size(), "index out of range");
193 assert(high >= low, "addresses out of order");
194 assert(pointer_delta(high, low) <= N_words, "offset too large");
195 assert(_offset_array[index] == pointer_delta(high, low),
196 "Wrong offset");
197 }
198
199 bool is_card_boundary(HeapWord* p) const;
200
201 // Return the number of slots needed for an offset array
202 // that covers mem_region_words words.
203 // We always add an extra slot because if an object
204 // ends on a card boundary we put a 0 in the next
205 // offset array slot, so we want that slot always
206 // to be reserved.
207
208 size_t compute_size(size_t mem_region_words) {
|