1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP 27 28 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" 29 #include "memory/memRegion.hpp" 30 #include "runtime/virtualspace.hpp" 31 #include "utilities/globalDefinitions.hpp" 32 33 // The CollectedHeap type requires subtypes to implement a method 34 // "block_start". For some subtypes, notably generational 35 // systems using card-table-based write barriers, the efficiency of this 36 // operation may be important. Implementations of the "BlockOffsetArray" 37 // class may be useful in providing such efficient implementations. 38 // 39 // While generally mirroring the structure of the BOT for GenCollectedHeap, 40 // the following types are tailored more towards G1's uses; these should, 41 // however, be merged back into a common BOT to avoid code duplication 42 // and reduce maintenance overhead. 43 // 44 // G1BlockOffsetTable (abstract) 45 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray) 46 // -- G1BlockOffsetArrayContigSpace 47 // 48 // A main impediment to the consolidation of this code might be the 49 // effect of making some of the block_start*() calls non-const as 50 // below. Whether that might adversely affect performance optimizations 51 // that compilers might normally perform in the case of non-G1 52 // collectors needs to be carefully investigated prior to any such 53 // consolidation. 54 55 // Forward declarations 56 class G1BlockOffsetSharedArray; 57 class G1OffsetTableContigSpace; 58 59 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC { 60 friend class VMStructs; 61 protected: 62 // These members describe the region covered by the table. 63 64 // The space this table is covering. 65 HeapWord* _bottom; // == reserved.start 66 HeapWord* _end; // End of currently allocated region. 67 68 public: 69 // Initialize the table to cover the given space. 70 // The contents of the initial table are undefined. 71 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) : 72 _bottom(bottom), _end(end) 73 { 74 assert(_bottom <= _end, "arguments out of order"); 75 } 76 77 // Note that the committed size of the covered space may have changed, 78 // so the table size might also wish to change. 79 virtual void resize(size_t new_word_size) = 0; 80 81 virtual void set_bottom(HeapWord* new_bottom) { 82 assert(new_bottom <= _end, 83 err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")", 84 p2i(new_bottom), p2i(_end))); 85 _bottom = new_bottom; 86 resize(pointer_delta(_end, _bottom)); 87 } 88 89 // Requires "addr" to be contained by a block, and returns the address of 90 // the start of that block. (May have side effects, namely updating of 91 // shared array entries that "point" too far backwards. This can occur, 92 // for example, when LAB allocation is used in a space covered by the 93 // table.) 94 virtual HeapWord* block_start_unsafe(const void* addr) = 0; 95 // Same as above, but does not have any of the possible side effects 96 // discussed above. 97 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0; 98 99 // Returns the address of the start of the block containing "addr", or 100 // else "null" if it is covered by no block. (May have side effects, 101 // namely updating of shared array entries that "point" too far 102 // backwards. This can occur, for example, when lab allocation is used 103 // in a space covered by the table.) 104 inline HeapWord* block_start(const void* addr); 105 // Same as above, but does not have any of the possible side effects 106 // discussed above. 107 inline HeapWord* block_start_const(const void* addr) const; 108 }; 109 110 class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener { 111 public: 112 virtual void on_commit(uint start_idx, size_t num_regions) { 113 // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot 114 // retrieve it here since this would cause firing of several asserts. The code 115 // executed after commit of a region already needs to do some re-initialization of 116 // the HeapRegion, so we combine that. 117 } 118 }; 119 120 // This implementation of "G1BlockOffsetTable" divides the covered region 121 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry 122 // for each such subregion indicates how far back one must go to find the 123 // start of the chunk that includes the first word of the subregion. 124 // 125 // Each BlockOffsetArray is owned by a Space. However, the actual array 126 // may be shared by several BlockOffsetArrays; this is useful 127 // when a single resizable area (such as a generation) is divided up into 128 // several spaces in which contiguous allocation takes place, 129 // such as, for example, in G1 or in the train generation.) 130 131 // Here is the shared array type. 132 133 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> { 134 friend class G1BlockOffsetArray; 135 friend class G1BlockOffsetArrayContigSpace; 136 friend class VMStructs; 137 138 private: 139 G1BlockOffsetSharedArrayMappingChangedListener _listener; 140 // The reserved region covered by the shared array. 141 MemRegion _reserved; 142 143 // End of the current committed region. 144 HeapWord* _end; 145 146 // Array for keeping offsets for retrieving object start fast given an 147 // address. 148 u_char* _offset_array; // byte array keeping backwards offsets 149 150 void check_offset(size_t offset, const char* msg) const { 151 assert(offset <= N_words, 152 err_msg("%s - " 153 "offset: " SIZE_FORMAT", N_words: %u", 154 msg, offset, (uint)N_words)); 155 } 156 157 // Bounds checking accessors: 158 // For performance these have to devolve to array accesses in product builds. 159 inline u_char offset_array(size_t index) const; 160 161 void set_offset_array_raw(size_t index, u_char offset) { 162 _offset_array[index] = offset; 163 } 164 165 inline void set_offset_array(size_t index, u_char offset); 166 167 inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low); 168 169 inline void set_offset_array(size_t left, size_t right, u_char offset); 170 171 bool is_card_boundary(HeapWord* p) const; 172 173 public: 174 175 // Return the number of slots needed for an offset array 176 // that covers mem_region_words words. 177 static size_t compute_size(size_t mem_region_words) { 178 size_t number_of_slots = (mem_region_words / N_words); 179 return ReservedSpace::allocation_align_size_up(number_of_slots); 180 } 181 182 enum SomePublicConstants { 183 LogN = 9, 184 LogN_words = LogN - LogHeapWordSize, 185 N_bytes = 1 << LogN, 186 N_words = 1 << LogN_words 187 }; 188 189 // Initialize the table to cover from "base" to (at least) 190 // "base + init_word_size". In the future, the table may be expanded 191 // (see "resize" below) up to the size of "_reserved" (which must be at 192 // least "init_word_size".) The contents of the initial table are 193 // undefined; it is the responsibility of the constituent 194 // G1BlockOffsetTable(s) to initialize cards. 195 G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage); 196 197 // Return the appropriate index into "_offset_array" for "p". 198 inline size_t index_for(const void* p) const; 199 inline size_t index_for_raw(const void* p) const; 200 201 // Return the address indicating the start of the region corresponding to 202 // "index" in "_offset_array". 203 inline HeapWord* address_for_index(size_t index) const; 204 // Variant of address_for_index that does not check the index for validity. 205 inline HeapWord* address_for_index_raw(size_t index) const { 206 return _reserved.start() + (index << LogN_words); 207 } 208 }; 209 210 // And here is the G1BlockOffsetTable subtype that uses the array. 211 212 class G1BlockOffsetArray: public G1BlockOffsetTable { 213 friend class G1BlockOffsetSharedArray; 214 friend class G1BlockOffsetArrayContigSpace; 215 friend class VMStructs; 216 private: 217 enum SomePrivateConstants { 218 N_words = G1BlockOffsetSharedArray::N_words, 219 LogN = G1BlockOffsetSharedArray::LogN 220 }; 221 222 // This is the array, which can be shared by several BlockOffsetArray's 223 // servicing different 224 G1BlockOffsetSharedArray* _array; 225 226 // The space that owns this subregion. 227 G1OffsetTableContigSpace* _gsp; 228 229 // The portion [_unallocated_block, _sp.end()) of the space that 230 // is a single block known not to contain any objects. 231 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. 232 HeapWord* _unallocated_block; 233 234 // Sets the entries 235 // corresponding to the cards starting at "start" and ending at "end" 236 // to point back to the card before "start": the interval [start, end) 237 // is right-open. 238 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); 239 // Same as above, except that the args here are a card _index_ interval 240 // that is closed: [start_index, end_index] 241 void set_remainder_to_point_to_start_incl(size_t start, size_t end); 242 243 protected: 244 245 G1OffsetTableContigSpace* gsp() const { return _gsp; } 246 247 inline size_t block_size(const HeapWord* p) const; 248 249 // Returns the address of a block whose start is at most "addr". 250 // If "has_max_index" is true, "assumes "max_index" is the last valid one 251 // in the array. 252 inline HeapWord* block_at_or_preceding(const void* addr, 253 bool has_max_index, 254 size_t max_index) const; 255 256 // "q" is a block boundary that is <= "addr"; "n" is the address of the 257 // next block (or the end of the space.) Return the address of the 258 // beginning of the block that contains "addr". Does so without side 259 // effects (see, e.g., spec of block_start.) 260 inline HeapWord* 261 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, 262 const void* addr) const; 263 264 // "q" is a block boundary that is <= "addr"; return the address of the 265 // beginning of the block that contains "addr". May have side effects 266 // on "this", by updating imprecise entries. 267 inline HeapWord* forward_to_block_containing_addr(HeapWord* q, 268 const void* addr); 269 270 // "q" is a block boundary that is <= "addr"; "n" is the address of the 271 // next block (or the end of the space.) Return the address of the 272 // beginning of the block that contains "addr". May have side effects 273 // on "this", by updating imprecise entries. 274 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q, 275 HeapWord* n, 276 const void* addr); 277 278 // Requires that "*threshold_" be the first array entry boundary at or 279 // above "blk_start", and that "*index_" be the corresponding array 280 // index. If the block starts at or crosses "*threshold_", records 281 // "blk_start" as the appropriate block start for the array index 282 // starting at "*threshold_", and for any other indices crossed by the 283 // block. Updates "*threshold_" and "*index_" to correspond to the first 284 // index after the block end. 285 void alloc_block_work2(HeapWord** threshold_, size_t* index_, 286 HeapWord* blk_start, HeapWord* blk_end); 287 288 public: 289 // The space may not have it's bottom and top set yet, which is why the 290 // region is passed as a parameter. The elements of the array are 291 // initialized to zero. 292 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr); 293 294 // Note: this ought to be part of the constructor, but that would require 295 // "this" to be passed as a parameter to a member constructor for 296 // the containing concrete subtype of Space. 297 // This would be legal C++, but MS VC++ doesn't allow it. 298 void set_space(G1OffsetTableContigSpace* sp); 299 300 // Resets the covered region to one with the same _bottom as before but 301 // the "new_word_size". 302 void resize(size_t new_word_size); 303 304 virtual HeapWord* block_start_unsafe(const void* addr); 305 virtual HeapWord* block_start_unsafe_const(const void* addr) const; 306 307 // Used by region verification. Checks that the contents of the 308 // BOT reflect that there's a single object that spans the address 309 // range [obj_start, obj_start + word_size); returns true if this is 310 // the case, returns false if it's not. 311 bool verify_for_object(HeapWord* obj_start, size_t word_size) const; 312 313 void check_all_cards(size_t left_card, size_t right_card) const; 314 315 virtual void print_on(outputStream* out) PRODUCT_RETURN; 316 }; 317 318 // A subtype of BlockOffsetArray that takes advantage of the fact 319 // that its underlying space is a ContiguousSpace, so that its "active" 320 // region can be more efficiently tracked (than for a non-contiguous space). 321 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray { 322 friend class VMStructs; 323 324 // allocation boundary at which offset array must be updated 325 HeapWord* _next_offset_threshold; 326 size_t _next_offset_index; // index corresponding to that boundary 327 328 // Work function to be called when allocation start crosses the next 329 // threshold in the contig space. 330 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) { 331 alloc_block_work2(&_next_offset_threshold, &_next_offset_index, 332 blk_start, blk_end); 333 } 334 335 // Variant of zero_bottom_entry that does not check for availability of the 336 // memory first. 337 void zero_bottom_entry_raw(); 338 // Variant of initialize_threshold that does not check for availability of the 339 // memory first. 340 HeapWord* initialize_threshold_raw(); 341 public: 342 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); 343 344 // Initialize the threshold to reflect the first boundary after the 345 // bottom of the covered region. 346 HeapWord* initialize_threshold(); 347 348 void reset_bot() { 349 zero_bottom_entry_raw(); 350 initialize_threshold_raw(); 351 } 352 353 // Return the next threshold, the point at which the table should be 354 // updated. 355 HeapWord* threshold() const { return _next_offset_threshold; } 356 357 // These must be guaranteed to work properly (i.e., do nothing) 358 // when "blk_start" ("blk" for second version) is "NULL". In this 359 // implementation, that's true because NULL is represented as 0, and thus 360 // never exceeds the "_next_offset_threshold". 361 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { 362 if (blk_end > _next_offset_threshold) 363 alloc_block_work1(blk_start, blk_end); 364 } 365 void alloc_block(HeapWord* blk, size_t size) { 366 alloc_block(blk, blk+size); 367 } 368 369 HeapWord* block_start_unsafe(const void* addr); 370 HeapWord* block_start_unsafe_const(const void* addr) const; 371 372 void set_for_starts_humongous(HeapWord* new_top); 373 374 virtual void print_on(outputStream* out) PRODUCT_RETURN; 375 }; 376 377 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP