8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
27
28 #include "memory/memRegion.hpp"
29 #include "runtime/virtualspace.hpp"
30 #include "utilities/globalDefinitions.hpp"
31
32 // The CollectedHeap type requires subtypes to implement a method
33 // "block_start". For some subtypes, notably generational
34 // systems using card-table-based write barriers, the efficiency of this
35 // operation may be important. Implementations of the "BlockOffsetArray"
36 // class may be useful in providing such efficient implementations.
37 //
38 // While generally mirroring the structure of the BOT for GenCollectedHeap,
39 // the following types are tailored more towards G1's uses; these should,
40 // however, be merged back into a common BOT to avoid code duplication
41 // and reduce maintenance overhead.
42 //
43 // G1BlockOffsetTable (abstract)
44 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
45 // -- G1BlockOffsetArrayContigSpace
46 //
47 // A main impediment to the consolidation of this code might be the
89 // the start of that block. (May have side effects, namely updating of
90 // shared array entries that "point" too far backwards. This can occur,
91 // for example, when LAB allocation is used in a space covered by the
92 // table.)
93 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
94 // Same as above, but does not have any of the possible side effects
95 // discussed above.
96 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
97
98 // Returns the address of the start of the block containing "addr", or
99 // else "null" if it is covered by no block. (May have side effects,
100 // namely updating of shared array entries that "point" too far
101 // backwards. This can occur, for example, when lab allocation is used
102 // in a space covered by the table.)
103 inline HeapWord* block_start(const void* addr);
104 // Same as above, but does not have any of the possible side effects
105 // discussed above.
106 inline HeapWord* block_start_const(const void* addr) const;
107 };
108
109 // This implementation of "G1BlockOffsetTable" divides the covered region
110 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
111 // for each such subregion indicates how far back one must go to find the
112 // start of the chunk that includes the first word of the subregion.
113 //
114 // Each BlockOffsetArray is owned by a Space. However, the actual array
115 // may be shared by several BlockOffsetArrays; this is useful
116 // when a single resizable area (such as a generation) is divided up into
117 // several spaces in which contiguous allocation takes place,
118 // such as, for example, in G1 or in the train generation.)
119
120 // Here is the shared array type.
121
122 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
123 friend class G1BlockOffsetArray;
124 friend class G1BlockOffsetArrayContigSpace;
125 friend class VMStructs;
126
127 private:
128 // The reserved region covered by the shared array.
129 MemRegion _reserved;
130
131 // End of the current committed region.
132 HeapWord* _end;
133
134 // Array for keeping offsets for retrieving object start fast given an
135 // address.
136 VirtualSpace _vs;
137 u_char* _offset_array; // byte array keeping backwards offsets
138
139 void check_index(size_t index, const char* msg) const {
140 assert(index < _vs.committed_size(),
141 err_msg("%s - "
142 "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
143 msg, index, _vs.committed_size()));
144 }
145
146 void check_offset(size_t offset, const char* msg) const {
147 assert(offset <= N_words,
148 err_msg("%s - "
149 "offset: " SIZE_FORMAT", N_words: %u",
150 msg, offset, (uint)N_words));
151 }
152
153 // Bounds checking accessors:
154 // For performance these have to devolve to array accesses in product builds.
155 u_char offset_array(size_t index) const {
156 check_index(index, "index out of range");
157 return _offset_array[index];
158 }
159
160 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
161
162 void set_offset_array(size_t index, u_char offset) {
163 check_index(index, "index out of range");
164 check_offset(offset, "offset too large");
165 _offset_array[index] = offset;
166 }
167
168 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
169 check_index(index, "index out of range");
170 assert(high >= low, "addresses out of order");
171 check_offset(pointer_delta(high, low), "offset too large");
172 _offset_array[index] = (u_char) pointer_delta(high, low);
173 }
174
175 void set_offset_array(size_t left, size_t right, u_char offset) {
176 check_index(right, "right index out of range");
177 assert(left <= right, "indexes out of order");
178 size_t num_cards = right - left + 1;
179 if (UseMemSetInBOT) {
180 memset(&_offset_array[left], offset, num_cards);
181 } else {
182 size_t i = left;
183 const size_t end = i + num_cards;
184 for (; i < end; i++) {
185 _offset_array[i] = offset;
186 }
187 }
188 }
189
190 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
191 check_index(index, "index out of range");
192 assert(high >= low, "addresses out of order");
193 check_offset(pointer_delta(high, low), "offset too large");
194 assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
195 }
196
197 bool is_card_boundary(HeapWord* p) const;
198
199 // Return the number of slots needed for an offset array
200 // that covers mem_region_words words.
201 // We always add an extra slot because if an object
202 // ends on a card boundary we put a 0 in the next
203 // offset array slot, so we want that slot always
204 // to be reserved.
205
206 size_t compute_size(size_t mem_region_words) {
207 size_t number_of_slots = (mem_region_words / N_words) + 1;
208 return ReservedSpace::page_align_size_up(number_of_slots);
209 }
210
211 public:
212 enum SomePublicConstants {
213 LogN = 9,
214 LogN_words = LogN - LogHeapWordSize,
215 N_bytes = 1 << LogN,
216 N_words = 1 << LogN_words
217 };
218
219 // Initialize the table to cover from "base" to (at least)
220 // "base + init_word_size". In the future, the table may be expanded
221 // (see "resize" below) up to the size of "_reserved" (which must be at
222 // least "init_word_size".) The contents of the initial table are
223 // undefined; it is the responsibility of the constituent
224 // G1BlockOffsetTable(s) to initialize cards.
225 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
226
227 // Notes a change in the committed size of the region covered by the
228 // table. The "new_word_size" may not be larger than the size of the
229 // reserved region this table covers.
230 void resize(size_t new_word_size);
231
232 void set_bottom(HeapWord* new_bottom);
233
234 // Return the appropriate index into "_offset_array" for "p".
235 inline size_t index_for(const void* p) const;
236
237 // Return the address indicating the start of the region corresponding to
238 // "index" in "_offset_array".
239 inline HeapWord* address_for_index(size_t index) const;
240 };
241
242 // And here is the G1BlockOffsetTable subtype that uses the array.
243
244 class G1BlockOffsetArray: public G1BlockOffsetTable {
245 friend class G1BlockOffsetSharedArray;
246 friend class G1BlockOffsetArrayContigSpace;
247 friend class VMStructs;
248 private:
249 enum SomePrivateConstants {
250 N_words = G1BlockOffsetSharedArray::N_words,
251 LogN = G1BlockOffsetSharedArray::LogN
252 };
253
254 // The following enums are used by do_block_helper
255 enum Action {
256 Action_single, // BOT records a single block (see single_block())
257 Action_mark, // BOT marks the start of a block (see mark_block())
258 Action_check // Check that BOT records block correctly
259 // (see verify_single_block()).
459 virtual void print_on(outputStream* out) PRODUCT_RETURN;
460 };
461
462 // A subtype of BlockOffsetArray that takes advantage of the fact
463 // that its underlying space is a ContiguousSpace, so that its "active"
464 // region can be more efficiently tracked (than for a non-contiguous space).
465 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
466 friend class VMStructs;
467
468 // allocation boundary at which offset array must be updated
469 HeapWord* _next_offset_threshold;
470 size_t _next_offset_index; // index corresponding to that boundary
471
472 // Work function to be called when allocation start crosses the next
473 // threshold in the contig space.
474 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
475 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
476 blk_start, blk_end);
477 }
478
479 // Zero out the entry for _bottom (offset will be zero).
480 void zero_bottom_entry();
481 public:
482 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
483
484 // Initialize the threshold to reflect the first boundary after the
485 // bottom of the covered region.
486 HeapWord* initialize_threshold();
487
488 void reset_bot() {
489 zero_bottom_entry();
490 initialize_threshold();
491 }
492
493 // Return the next threshold, the point at which the table should be
494 // updated.
495 HeapWord* threshold() const { return _next_offset_threshold; }
496
497 // These must be guaranteed to work properly (i.e., do nothing)
498 // when "blk_start" ("blk" for second version) is "NULL". In this
499 // implementation, that's true because NULL is represented as 0, and thus
500 // never exceeds the "_next_offset_threshold".
501 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
502 if (blk_end > _next_offset_threshold)
503 alloc_block_work1(blk_start, blk_end);
504 }
505 void alloc_block(HeapWord* blk, size_t size) {
506 alloc_block(blk, blk+size);
507 }
508
509 HeapWord* block_start_unsafe(const void* addr);
510 HeapWord* block_start_unsafe_const(const void* addr) const;
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
27
28 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
29 #include "memory/memRegion.hpp"
30 #include "runtime/virtualspace.hpp"
31 #include "utilities/globalDefinitions.hpp"
32
33 // The CollectedHeap type requires subtypes to implement a method
34 // "block_start". For some subtypes, notably generational
35 // systems using card-table-based write barriers, the efficiency of this
36 // operation may be important. Implementations of the "BlockOffsetArray"
37 // class may be useful in providing such efficient implementations.
38 //
39 // While generally mirroring the structure of the BOT for GenCollectedHeap,
40 // the following types are tailored more towards G1's uses; these should,
41 // however, be merged back into a common BOT to avoid code duplication
42 // and reduce maintenance overhead.
43 //
44 // G1BlockOffsetTable (abstract)
45 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
46 // -- G1BlockOffsetArrayContigSpace
47 //
48 // A main impediment to the consolidation of this code might be the
90 // the start of that block. (May have side effects, namely updating of
91 // shared array entries that "point" too far backwards. This can occur,
92 // for example, when LAB allocation is used in a space covered by the
93 // table.)
94 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
95 // Same as above, but does not have any of the possible side effects
96 // discussed above.
97 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
98
99 // Returns the address of the start of the block containing "addr", or
100 // else "null" if it is covered by no block. (May have side effects,
101 // namely updating of shared array entries that "point" too far
102 // backwards. This can occur, for example, when lab allocation is used
103 // in a space covered by the table.)
104 inline HeapWord* block_start(const void* addr);
105 // Same as above, but does not have any of the possible side effects
106 // discussed above.
107 inline HeapWord* block_start_const(const void* addr) const;
108 };
109
110 class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
111 public:
112 virtual void on_commit(uint start_idx, size_t num_regions);
113 };
114
115 // This implementation of "G1BlockOffsetTable" divides the covered region
116 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
117 // for each such subregion indicates how far back one must go to find the
118 // start of the chunk that includes the first word of the subregion.
119 //
120 // Each BlockOffsetArray is owned by a Space. However, the actual array
121 // may be shared by several BlockOffsetArrays; this is useful
122 // when a single resizable area (such as a generation) is divided up into
123 // several spaces in which contiguous allocation takes place,
124 // such as, for example, in G1 or in the train generation.)
125
126 // Here is the shared array type.
127
128 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
129 friend class G1BlockOffsetArray;
130 friend class G1BlockOffsetArrayContigSpace;
131 friend class VMStructs;
132
133 private:
134 G1BlockOffsetSharedArrayMappingChangedListener _listener;
135 // The reserved region covered by the shared array.
136 MemRegion _reserved;
137
138 // End of the current committed region.
139 HeapWord* _end;
140
141 // Array for keeping offsets for retrieving object start fast given an
142 // address.
143 u_char* _offset_array; // byte array keeping backwards offsets
144
145 void check_offset(size_t offset, const char* msg) const {
146 assert(offset <= N_words,
147 err_msg("%s - "
148 "offset: " SIZE_FORMAT", N_words: %u",
149 msg, offset, (uint)N_words));
150 }
151
152 // Bounds checking accessors:
153 // For performance these have to devolve to array accesses in product builds.
154 inline u_char offset_array(size_t index) const;
155
156 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
157
158 void set_offset_array_raw(size_t index, u_char offset) {
159 _offset_array[index] = offset;
160 }
161
162 inline void set_offset_array(size_t index, u_char offset);
163
164 inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
165
166 inline void set_offset_array(size_t left, size_t right, u_char offset);
167
168 inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
169
170 bool is_card_boundary(HeapWord* p) const;
171
172 public:
173
174 // Return the number of slots needed for an offset array
175 // that covers mem_region_words words.
176 static size_t compute_size(size_t mem_region_words) {
177 size_t number_of_slots = (mem_region_words / N_words);
178 return ReservedSpace::allocation_align_size_up(number_of_slots);
179 }
180
181 enum SomePublicConstants {
182 LogN = 9,
183 LogN_words = LogN - LogHeapWordSize,
184 N_bytes = 1 << LogN,
185 N_words = 1 << LogN_words
186 };
187
188 // Initialize the table to cover from "base" to (at least)
189 // "base + init_word_size". In the future, the table may be expanded
190 // (see "resize" below) up to the size of "_reserved" (which must be at
191 // least "init_word_size".) The contents of the initial table are
192 // undefined; it is the responsibility of the constituent
193 // G1BlockOffsetTable(s) to initialize cards.
194 G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
195
196 void set_bottom(HeapWord* new_bottom);
197
198 // Return the appropriate index into "_offset_array" for "p".
199 inline size_t index_for(const void* p) const;
200 inline size_t index_for_raw(const void* p) const;
201
202 // Return the address indicating the start of the region corresponding to
203 // "index" in "_offset_array".
204 inline HeapWord* address_for_index(size_t index) const;
205 // Variant of address_for_index that does not check the index for validity.
206 inline HeapWord* address_for_index_raw(size_t index) const {
207 return _reserved.start() + (index << LogN_words);
208 }
209 };
210
211 // And here is the G1BlockOffsetTable subtype that uses the array.
212
213 class G1BlockOffsetArray: public G1BlockOffsetTable {
214 friend class G1BlockOffsetSharedArray;
215 friend class G1BlockOffsetArrayContigSpace;
216 friend class VMStructs;
217 private:
218 enum SomePrivateConstants {
219 N_words = G1BlockOffsetSharedArray::N_words,
220 LogN = G1BlockOffsetSharedArray::LogN
221 };
222
223 // The following enums are used by do_block_helper
224 enum Action {
225 Action_single, // BOT records a single block (see single_block())
226 Action_mark, // BOT marks the start of a block (see mark_block())
227 Action_check // Check that BOT records block correctly
228 // (see verify_single_block()).
428 virtual void print_on(outputStream* out) PRODUCT_RETURN;
429 };
430
431 // A subtype of BlockOffsetArray that takes advantage of the fact
432 // that its underlying space is a ContiguousSpace, so that its "active"
433 // region can be more efficiently tracked (than for a non-contiguous space).
434 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
435 friend class VMStructs;
436
437 // allocation boundary at which offset array must be updated
438 HeapWord* _next_offset_threshold;
439 size_t _next_offset_index; // index corresponding to that boundary
440
441 // Work function to be called when allocation start crosses the next
442 // threshold in the contig space.
443 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
444 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
445 blk_start, blk_end);
446 }
447
448 // Variant of zero_bottom_entry that does not check for availability of the
449 // memory first.
450 void zero_bottom_entry_raw();
451 // Variant of initialize_threshold that does not check for availability of the
452 // memory first.
453 HeapWord* initialize_threshold_raw();
454 // Zero out the entry for _bottom (offset will be zero).
455 void zero_bottom_entry();
456 public:
457 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
458
459 // Initialize the threshold to reflect the first boundary after the
460 // bottom of the covered region.
461 HeapWord* initialize_threshold();
462
463 void reset_bot() {
464 zero_bottom_entry_raw();
465 initialize_threshold_raw();
466 }
467
468 // Return the next threshold, the point at which the table should be
469 // updated.
470 HeapWord* threshold() const { return _next_offset_threshold; }
471
472 // These must be guaranteed to work properly (i.e., do nothing)
473 // when "blk_start" ("blk" for second version) is "NULL". In this
474 // implementation, that's true because NULL is represented as 0, and thus
475 // never exceeds the "_next_offset_threshold".
476 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
477 if (blk_end > _next_offset_threshold)
478 alloc_block_work1(blk_start, blk_end);
479 }
480 void alloc_block(HeapWord* blk, size_t size) {
481 alloc_block(blk, blk+size);
482 }
483
484 HeapWord* block_start_unsafe(const void* addr);
485 HeapWord* block_start_unsafe_const(const void* addr) const;
|