1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP 26 #define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP 27 28 #include "gc/shared/memset_with_concurrent_readers.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/memRegion.hpp" 31 #include "memory/virtualspace.hpp" 32 #include "runtime/globals.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 #include "utilities/macros.hpp" 35 36 // The CollectedHeap type requires subtypes to implement a method 37 // "block_start". For some subtypes, notably generational 38 // systems using card-table-based write barriers, the efficiency of this 39 // operation may be important. Implementations of the "BlockOffsetArray" 40 // class may be useful in providing such efficient implementations. 41 // 42 // BlockOffsetTable (abstract) 43 // - BlockOffsetArray (abstract) 44 // - BlockOffsetArrayNonContigSpace 45 // - BlockOffsetArrayContigSpace 46 // 47 48 class ContiguousSpace; 49 50 class BOTConstants : public AllStatic { 51 public: 52 static const uint LogN = 9; 53 static const uint LogN_words = LogN - LogHeapWordSize; 54 static const uint N_bytes = 1 << LogN; 55 static const uint N_words = 1 << LogN_words; 56 // entries "e" of at least N_words mean "go back by Base^(e-N_words)." 57 // All entries are less than "N_words + N_powers". 58 static const uint LogBase = 4; 59 static const uint Base = (1 << LogBase); 60 static const uint N_powers = 14; 61 62 static size_t power_to_cards_back(uint i) { 63 return (size_t)1 << (LogBase * i); 64 } 65 static size_t power_to_words_back(uint i) { 66 return power_to_cards_back(i) * N_words; 67 } 68 static size_t entry_to_cards_back(u_char entry) { 69 assert(entry >= N_words, "Precondition"); 70 return power_to_cards_back(entry - N_words); 71 } 72 static size_t entry_to_words_back(u_char entry) { 73 assert(entry >= N_words, "Precondition"); 74 return power_to_words_back(entry - N_words); 75 } 76 }; 77 78 ////////////////////////////////////////////////////////////////////////// 79 // The BlockOffsetTable "interface" 80 ////////////////////////////////////////////////////////////////////////// 81 class BlockOffsetTable { 82 friend class VMStructs; 83 protected: 84 // These members describe the region covered by the table. 85 86 // The space this table is covering. 87 HeapWord* _bottom; // == reserved.start 88 HeapWord* _end; // End of currently allocated region. 89 90 public: 91 // Initialize the table to cover the given space. 92 // The contents of the initial table are undefined. 93 BlockOffsetTable(HeapWord* bottom, HeapWord* end): 94 _bottom(bottom), _end(end) { 95 assert(_bottom <= _end, "arguments out of order"); 96 } 97 98 // Note that the committed size of the covered space may have changed, 99 // so the table size might also wish to change. 100 virtual void resize(size_t new_word_size) = 0; 101 102 virtual void set_bottom(HeapWord* new_bottom) { 103 assert(new_bottom <= _end, "new_bottom > _end"); 104 _bottom = new_bottom; 105 resize(pointer_delta(_end, _bottom)); 106 } 107 108 // Requires "addr" to be contained by a block, and returns the address of 109 // the start of that block. 110 virtual HeapWord* block_start_unsafe(const void* addr) const = 0; 111 112 // Returns the address of the start of the block containing "addr", or 113 // else "null" if it is covered by no block. 114 HeapWord* block_start(const void* addr) const; 115 }; 116 117 ////////////////////////////////////////////////////////////////////////// 118 // One implementation of "BlockOffsetTable," the BlockOffsetArray, 119 // divides the covered region into "N"-word subregions (where 120 // "N" = 2^"LogN". An array with an entry for each such subregion 121 // indicates how far back one must go to find the start of the 122 // chunk that includes the first word of the subregion. 123 // 124 // Each BlockOffsetArray is owned by a Space. However, the actual array 125 // may be shared by several BlockOffsetArrays; this is useful 126 // when a single resizable area (such as a generation) is divided up into 127 // several spaces in which contiguous allocation takes place. (Consider, 128 // for example, the garbage-first generation.) 129 130 // Here is the shared array type. 131 ////////////////////////////////////////////////////////////////////////// 132 // BlockOffsetSharedArray 133 ////////////////////////////////////////////////////////////////////////// 134 class BlockOffsetSharedArray: public CHeapObj<mtGC> { 135 friend class BlockOffsetArray; 136 friend class BlockOffsetArrayNonContigSpace; 137 friend class BlockOffsetArrayContigSpace; 138 friend class VMStructs; 139 140 private: 141 bool _init_to_zero; 142 143 // The reserved region covered by the shared array. 144 MemRegion _reserved; 145 146 // End of the current committed region. 147 HeapWord* _end; 148 149 // Array for keeping offsets for retrieving object start fast given an 150 // address. 151 VirtualSpace _vs; 152 u_char* _offset_array; // byte array keeping backwards offsets 153 154 void fill_range(size_t start, size_t num_cards, u_char offset) { 155 void* start_ptr = &_offset_array[start]; 156 // If collector is concurrent, special handling may be needed. 157 G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");) 158 #if INCLUDE_CMSGC 159 if (UseConcMarkSweepGC) { 160 memset_with_concurrent_readers(start_ptr, offset, num_cards); 161 return; 162 } 163 #endif // INCLUDE_CMSGC 164 memset(start_ptr, offset, num_cards); 165 } 166 167 protected: 168 // Bounds checking accessors: 169 // For performance these have to devolve to array accesses in product builds. 170 u_char offset_array(size_t index) const { 171 assert(index < _vs.committed_size(), "index out of range"); 172 return _offset_array[index]; 173 } 174 // An assertion-checking helper method for the set_offset_array() methods below. 175 void check_reducing_assertion(bool reducing); 176 177 void set_offset_array(size_t index, u_char offset, bool reducing = false) { 178 check_reducing_assertion(reducing); 179 assert(index < _vs.committed_size(), "index out of range"); 180 assert(!reducing || _offset_array[index] >= offset, "Not reducing"); 181 _offset_array[index] = offset; 182 } 183 184 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) { 185 check_reducing_assertion(reducing); 186 assert(index < _vs.committed_size(), "index out of range"); 187 assert(high >= low, "addresses out of order"); 188 assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large"); 189 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low), 190 "Not reducing"); 191 _offset_array[index] = (u_char)pointer_delta(high, low); 192 } 193 194 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) { 195 check_reducing_assertion(reducing); 196 assert(index_for(right - 1) < _vs.committed_size(), 197 "right address out of range"); 198 assert(left < right, "Heap addresses out of order"); 199 size_t num_cards = pointer_delta(right, left) >> BOTConstants::LogN_words; 200 201 fill_range(index_for(left), num_cards, offset); 202 } 203 204 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) { 205 check_reducing_assertion(reducing); 206 assert(right < _vs.committed_size(), "right address out of range"); 207 assert(left <= right, "indexes out of order"); 208 size_t num_cards = right - left + 1; 209 210 fill_range(left, num_cards, offset); 211 } 212 213 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { 214 assert(index < _vs.committed_size(), "index out of range"); 215 assert(high >= low, "addresses out of order"); 216 assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large"); 217 assert(_offset_array[index] == pointer_delta(high, low), 218 "Wrong offset"); 219 } 220 221 bool is_card_boundary(HeapWord* p) const; 222 223 // Return the number of slots needed for an offset array 224 // that covers mem_region_words words. 225 // We always add an extra slot because if an object 226 // ends on a card boundary we put a 0 in the next 227 // offset array slot, so we want that slot always 228 // to be reserved. 229 230 size_t compute_size(size_t mem_region_words) { 231 size_t number_of_slots = (mem_region_words / BOTConstants::N_words) + 1; 232 return ReservedSpace::allocation_align_size_up(number_of_slots); 233 } 234 235 public: 236 // Initialize the table to cover from "base" to (at least) 237 // "base + init_word_size". In the future, the table may be expanded 238 // (see "resize" below) up to the size of "_reserved" (which must be at 239 // least "init_word_size".) The contents of the initial table are 240 // undefined; it is the responsibility of the constituent 241 // BlockOffsetTable(s) to initialize cards. 242 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); 243 244 // Notes a change in the committed size of the region covered by the 245 // table. The "new_word_size" may not be larger than the size of the 246 // reserved region this table covers. 247 void resize(size_t new_word_size); 248 249 void set_bottom(HeapWord* new_bottom); 250 251 // Whether entries should be initialized to zero. Used currently only for 252 // error checking. 253 void set_init_to_zero(bool val) { _init_to_zero = val; } 254 bool init_to_zero() { return _init_to_zero; } 255 256 // Updates all the BlockOffsetArray's sharing this shared array to 257 // reflect the current "top"'s of their spaces. 258 void update_offset_arrays(); // Not yet implemented! 259 260 // Return the appropriate index into "_offset_array" for "p". 261 size_t index_for(const void* p) const; 262 263 // Return the address indicating the start of the region corresponding to 264 // "index" in "_offset_array". 265 HeapWord* address_for_index(size_t index) const; 266 }; 267 268 ////////////////////////////////////////////////////////////////////////// 269 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. 270 ////////////////////////////////////////////////////////////////////////// 271 class BlockOffsetArray: public BlockOffsetTable { 272 friend class VMStructs; 273 protected: 274 // The following enums are used by do_block_internal() below 275 enum Action { 276 Action_single, // BOT records a single block (see single_block()) 277 Action_mark, // BOT marks the start of a block (see mark_block()) 278 Action_check // Check that BOT records block correctly 279 // (see verify_single_block()). 280 }; 281 282 // The shared array, which is shared with other BlockOffsetArray's 283 // corresponding to different spaces within a generation or span of 284 // memory. 285 BlockOffsetSharedArray* _array; 286 287 // The space that owns this subregion. 288 Space* _sp; 289 290 // If true, array entries are initialized to 0; otherwise, they are 291 // initialized to point backwards to the beginning of the covered region. 292 bool _init_to_zero; 293 294 // An assertion-checking helper method for the set_remainder*() methods below. 295 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); } 296 297 // Sets the entries 298 // corresponding to the cards starting at "start" and ending at "end" 299 // to point back to the card before "start": the interval [start, end) 300 // is right-open. The last parameter, reducing, indicates whether the 301 // updates to individual entries always reduce the entry from a higher 302 // to a lower value. (For example this would hold true during a temporal 303 // regime during which only block splits were updating the BOT. 304 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false); 305 // Same as above, except that the args here are a card _index_ interval 306 // that is closed: [start_index, end_index] 307 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false); 308 309 // A helper function for BOT adjustment/verification work 310 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false); 311 312 public: 313 // The space may not have its bottom and top set yet, which is why the 314 // region is passed as a parameter. If "init_to_zero" is true, the 315 // elements of the array are initialized to zero. Otherwise, they are 316 // initialized to point backwards to the beginning. 317 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, 318 bool init_to_zero_); 319 320 // Note: this ought to be part of the constructor, but that would require 321 // "this" to be passed as a parameter to a member constructor for 322 // the containing concrete subtype of Space. 323 // This would be legal C++, but MS VC++ doesn't allow it. 324 void set_space(Space* sp) { _sp = sp; } 325 326 // Resets the covered region to the given "mr". 327 void set_region(MemRegion mr) { 328 _bottom = mr.start(); 329 _end = mr.end(); 330 } 331 332 // Note that the committed size of the covered space may have changed, 333 // so the table size might also wish to change. 334 virtual void resize(size_t new_word_size) { 335 HeapWord* new_end = _bottom + new_word_size; 336 if (_end < new_end && !init_to_zero()) { 337 // verify that the old and new boundaries are also card boundaries 338 assert(_array->is_card_boundary(_end), 339 "_end not a card boundary"); 340 assert(_array->is_card_boundary(new_end), 341 "new _end would not be a card boundary"); 342 // set all the newly added cards 343 _array->set_offset_array(_end, new_end, BOTConstants::N_words); 344 } 345 _end = new_end; // update _end 346 } 347 348 // Adjust the BOT to show that it has a single block in the 349 // range [blk_start, blk_start + size). All necessary BOT 350 // cards are adjusted, but _unallocated_block isn't. 351 void single_block(HeapWord* blk_start, HeapWord* blk_end); 352 void single_block(HeapWord* blk, size_t size) { 353 single_block(blk, blk + size); 354 } 355 356 // When the alloc_block() call returns, the block offset table should 357 // have enough information such that any subsequent block_start() call 358 // with an argument equal to an address that is within the range 359 // [blk_start, blk_end) would return the value blk_start, provided 360 // there have been no calls in between that reset this information 361 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call 362 // for an appropriate range covering the said interval). 363 // These methods expect to be called with [blk_start, blk_end) 364 // representing a block of memory in the heap. 365 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); 366 void alloc_block(HeapWord* blk, size_t size) { 367 alloc_block(blk, blk + size); 368 } 369 370 // If true, initialize array slots with no allocated blocks to zero. 371 // Otherwise, make them point back to the front. 372 bool init_to_zero() { return _init_to_zero; } 373 // Corresponding setter 374 void set_init_to_zero(bool val) { 375 _init_to_zero = val; 376 assert(_array != NULL, "_array should be non-NULL"); 377 _array->set_init_to_zero(val); 378 } 379 380 // Debugging 381 // Return the index of the last entry in the "active" region. 382 virtual size_t last_active_index() const = 0; 383 // Verify the block offset table 384 void verify() const; 385 void check_all_cards(size_t left_card, size_t right_card) const; 386 }; 387 388 //////////////////////////////////////////////////////////////////////////// 389 // A subtype of BlockOffsetArray that takes advantage of the fact 390 // that its underlying space is a NonContiguousSpace, so that some 391 // specialized interfaces can be made available for spaces that 392 // manipulate the table. 393 //////////////////////////////////////////////////////////////////////////// 394 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { 395 friend class VMStructs; 396 private: 397 // The portion [_unallocated_block, _sp.end()) of the space that 398 // is a single block known not to contain any objects. 399 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. 400 HeapWord* _unallocated_block; 401 402 public: 403 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): 404 BlockOffsetArray(array, mr, false), 405 _unallocated_block(_bottom) { } 406 407 // Accessor 408 HeapWord* unallocated_block() const { 409 assert(BlockOffsetArrayUseUnallocatedBlock, 410 "_unallocated_block is not being maintained"); 411 return _unallocated_block; 412 } 413 414 void set_unallocated_block(HeapWord* block) { 415 assert(BlockOffsetArrayUseUnallocatedBlock, 416 "_unallocated_block is not being maintained"); 417 assert(block >= _bottom && block <= _end, "out of range"); 418 _unallocated_block = block; 419 } 420 421 // These methods expect to be called with [blk_start, blk_end) 422 // representing a block of memory in the heap. 423 void alloc_block(HeapWord* blk_start, HeapWord* blk_end); 424 void alloc_block(HeapWord* blk, size_t size) { 425 alloc_block(blk, blk + size); 426 } 427 428 // The following methods are useful and optimized for a 429 // non-contiguous space. 430 431 // Given a block [blk_start, blk_start + full_blk_size), and 432 // a left_blk_size < full_blk_size, adjust the BOT to show two 433 // blocks [blk_start, blk_start + left_blk_size) and 434 // [blk_start + left_blk_size, blk_start + full_blk_size). 435 // It is assumed (and verified in the non-product VM) that the 436 // BOT was correct for the original block. 437 void split_block(HeapWord* blk_start, size_t full_blk_size, 438 size_t left_blk_size); 439 440 // Adjust BOT to show that it has a block in the range 441 // [blk_start, blk_start + size). Only the first card 442 // of BOT is touched. It is assumed (and verified in the 443 // non-product VM) that the remaining cards of the block 444 // are correct. 445 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false); 446 void mark_block(HeapWord* blk, size_t size, bool reducing = false) { 447 mark_block(blk, blk + size, reducing); 448 } 449 450 // Adjust _unallocated_block to indicate that a particular 451 // block has been newly allocated or freed. It is assumed (and 452 // verified in the non-product VM) that the BOT is correct for 453 // the given block. 454 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) { 455 // Verify that the BOT shows [blk, blk + blk_size) to be one block. 456 verify_single_block(blk_start, blk_end); 457 if (BlockOffsetArrayUseUnallocatedBlock) { 458 _unallocated_block = MAX2(_unallocated_block, blk_end); 459 } 460 } 461 462 void allocated(HeapWord* blk, size_t size, bool reducing = false) { 463 allocated(blk, blk + size, reducing); 464 } 465 466 void freed(HeapWord* blk_start, HeapWord* blk_end); 467 void freed(HeapWord* blk, size_t size); 468 469 HeapWord* block_start_unsafe(const void* addr) const; 470 471 // Requires "addr" to be the start of a card and returns the 472 // start of the block that contains the given address. 473 HeapWord* block_start_careful(const void* addr) const; 474 475 // Verification & debugging: ensure that the offset table reflects 476 // the fact that the block [blk_start, blk_end) or [blk, blk + size) 477 // is a single block of storage. NOTE: can't const this because of 478 // call to non-const do_block_internal() below. 479 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) 480 PRODUCT_RETURN; 481 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; 482 483 // Verify that the given block is before _unallocated_block 484 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) 485 const PRODUCT_RETURN; 486 void verify_not_unallocated(HeapWord* blk, size_t size) 487 const PRODUCT_RETURN; 488 489 // Debugging support 490 virtual size_t last_active_index() const; 491 }; 492 493 //////////////////////////////////////////////////////////////////////////// 494 // A subtype of BlockOffsetArray that takes advantage of the fact 495 // that its underlying space is a ContiguousSpace, so that its "active" 496 // region can be more efficiently tracked (than for a non-contiguous space). 497 //////////////////////////////////////////////////////////////////////////// 498 class BlockOffsetArrayContigSpace: public BlockOffsetArray { 499 friend class VMStructs; 500 private: 501 // allocation boundary at which offset array must be updated 502 HeapWord* _next_offset_threshold; 503 size_t _next_offset_index; // index corresponding to that boundary 504 505 // Work function when allocation start crosses threshold. 506 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); 507 508 public: 509 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): 510 BlockOffsetArray(array, mr, true) { 511 _next_offset_threshold = NULL; 512 _next_offset_index = 0; 513 } 514 515 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } 516 517 // Initialize the threshold for an empty heap. 518 HeapWord* initialize_threshold(); 519 // Zero out the entry for _bottom (offset will be zero) 520 void zero_bottom_entry(); 521 522 // Return the next threshold, the point at which the table should be 523 // updated. 524 HeapWord* threshold() const { return _next_offset_threshold; } 525 526 // In general, these methods expect to be called with 527 // [blk_start, blk_end) representing a block of memory in the heap. 528 // In this implementation, however, we are OK even if blk_start and/or 529 // blk_end are NULL because NULL is represented as 0, and thus 530 // never exceeds the "_next_offset_threshold". 531 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { 532 if (blk_end > _next_offset_threshold) { 533 alloc_block_work(blk_start, blk_end); 534 } 535 } 536 void alloc_block(HeapWord* blk, size_t size) { 537 alloc_block(blk, blk + size); 538 } 539 540 HeapWord* block_start_unsafe(const void* addr) const; 541 542 // Debugging support 543 virtual size_t last_active_index() const; 544 }; 545 546 #endif // SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP