136 friend class BlockOffsetArrayNonContigSpace;
137 friend class BlockOffsetArrayContigSpace;
138 friend class VMStructs;
139
140 private:
141 bool _init_to_zero;
142
143 // The reserved region covered by the shared array.
144 MemRegion _reserved;
145
146 // End of the current committed region.
147 HeapWord* _end;
148
149 // Array for keeping offsets for retrieving object start fast given an
150 // address.
151 VirtualSpace _vs;
152 u_char* _offset_array; // byte array keeping backwards offsets
153
154 void fill_range(size_t start, size_t num_cards, u_char offset) {
155 void* start_ptr = &_offset_array[start];
156 #if INCLUDE_ALL_GCS
157 // If collector is concurrent, special handling may be needed.
158 assert(!UseG1GC, "Shouldn't be here when using G1");
159 if (UseConcMarkSweepGC) {
160 memset_with_concurrent_readers(start_ptr, offset, num_cards);
161 return;
162 }
163 #endif // INCLUDE_ALL_GCS
164 memset(start_ptr, offset, num_cards);
165 }
166
167 protected:
168 // Bounds checking accessors:
169 // For performance these have to devolve to array accesses in product builds.
170 u_char offset_array(size_t index) const {
171 assert(index < _vs.committed_size(), "index out of range");
172 return _offset_array[index];
173 }
174 // An assertion-checking helper method for the set_offset_array() methods below.
175 void check_reducing_assertion(bool reducing);
176
177 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
178 check_reducing_assertion(reducing);
179 assert(index < _vs.committed_size(), "index out of range");
180 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
181 _offset_array[index] = offset;
182 }
183
|
136 friend class BlockOffsetArrayNonContigSpace;
137 friend class BlockOffsetArrayContigSpace;
138 friend class VMStructs;
139
140 private:
141 bool _init_to_zero;
142
143 // The reserved region covered by the shared array.
144 MemRegion _reserved;
145
146 // End of the current committed region.
147 HeapWord* _end;
148
149 // Array for keeping offsets for retrieving object start fast given an
150 // address.
151 VirtualSpace _vs;
152 u_char* _offset_array; // byte array keeping backwards offsets
153
154 void fill_range(size_t start, size_t num_cards, u_char offset) {
155 void* start_ptr = &_offset_array[start];
156 // If collector is concurrent, special handling may be needed.
157 G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
158 #if INCLUDE_CMSGC
159 if (UseConcMarkSweepGC) {
160 memset_with_concurrent_readers(start_ptr, offset, num_cards);
161 return;
162 }
163 #endif // INCLUDE_CMSGC
164 memset(start_ptr, offset, num_cards);
165 }
166
167 protected:
168 // Bounds checking accessors:
169 // For performance these have to devolve to array accesses in product builds.
170 u_char offset_array(size_t index) const {
171 assert(index < _vs.committed_size(), "index out of range");
172 return _offset_array[index];
173 }
174 // An assertion-checking helper method for the set_offset_array() methods below.
175 void check_reducing_assertion(bool reducing);
176
177 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
178 check_reducing_assertion(reducing);
179 assert(index < _vs.committed_size(), "index out of range");
180 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
181 _offset_array[index] = offset;
182 }
183
|