1 /* 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // The CollectedHeap type requires subtypes to implement a method 26 // "block_start". For some subtypes, notably generational 27 // systems using card-table-based write barriers, the efficiency of this 28 // operation may be important. Implementations of the "BlockOffsetArray" 29 // class may be useful in providing such efficient implementations. 30 // 31 // BlockOffsetTable (abstract) 32 // - BlockOffsetArray (abstract) 33 // - BlockOffsetArrayNonContigSpace 34 // - BlockOffsetArrayContigSpace 35 // 36 37 class ContiguousSpace; 38 class SerializeOopClosure; 39 40 ////////////////////////////////////////////////////////////////////////// 41 // The BlockOffsetTable "interface" 42 ////////////////////////////////////////////////////////////////////////// 43 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC { 44 friend class VMStructs; 45 protected: 46 // These members describe the region covered by the table. 47 48 // The space this table is covering. 49 HeapWord* _bottom; // == reserved.start 50 HeapWord* _end; // End of currently allocated region. 51 52 public: 53 // Initialize the table to cover the given space. 54 // The contents of the initial table are undefined. 55 BlockOffsetTable(HeapWord* bottom, HeapWord* end): 56 _bottom(bottom), _end(end) { 57 assert(_bottom <= _end, "arguments out of order"); 58 } 59 60 // Note that the committed size of the covered space may have changed, 61 // so the table size might also wish to change. 62 virtual void resize(size_t new_word_size) = 0; 63 64 virtual void set_bottom(HeapWord* new_bottom) { 65 assert(new_bottom <= _end, "new_bottom > _end"); 66 _bottom = new_bottom; 67 resize(pointer_delta(_end, _bottom)); 68 } 69 70 // Requires "addr" to be contained by a block, and returns the address of 71 // the start of that block. 72 virtual HeapWord* block_start_unsafe(const void* addr) const = 0; 73 74 // Returns the address of the start of the block containing "addr", or 75 // else "null" if it is covered by no block. 76 HeapWord* block_start(const void* addr) const; 77 }; 78 79 ////////////////////////////////////////////////////////////////////////// 80 // One implementation of "BlockOffsetTable," the BlockOffsetArray, 81 // divides the covered region into "N"-word subregions (where 82 // "N" = 2^"LogN". An array with an entry for each such subregion 83 // indicates how far back one must go to find the start of the 84 // chunk that includes the first word of the subregion. 85 // 86 // Each BlockOffsetArray is owned by a Space. However, the actual array 87 // may be shared by several BlockOffsetArrays; this is useful 88 // when a single resizable area (such as a generation) is divided up into 89 // several spaces in which contiguous allocation takes place. (Consider, 90 // for example, the garbage-first generation.) 91 92 // Here is the shared array type. 93 ////////////////////////////////////////////////////////////////////////// 94 // BlockOffsetSharedArray 95 ////////////////////////////////////////////////////////////////////////// 96 class BlockOffsetSharedArray: public CHeapObj { 97 friend class BlockOffsetArray; 98 friend class BlockOffsetArrayNonContigSpace; 99 friend class BlockOffsetArrayContigSpace; 100 friend class VMStructs; 101 102 private: 103 enum SomePrivateConstants { 104 LogN = 9, 105 LogN_words = LogN - LogHeapWordSize, 106 N_bytes = 1 << LogN, 107 N_words = 1 << LogN_words 108 }; 109 110 bool _init_to_zero; 111 112 // The reserved region covered by the shared array. 113 MemRegion _reserved; 114 115 // End of the current committed region. 116 HeapWord* _end; 117 118 // Array for keeping offsets for retrieving object start fast given an 119 // address. 120 VirtualSpace _vs; 121 u_char* _offset_array; // byte array keeping backwards offsets 122 123 protected: 124 // Bounds checking accessors: 125 // For performance these have to devolve to array accesses in product builds. 126 u_char offset_array(size_t index) const { 127 assert(index < _vs.committed_size(), "index out of range"); 128 return _offset_array[index]; 129 } 130 // An assertion-checking helper method for the set_offset_array() methods below. 131 void check_reducing_assertion(bool reducing); 132 133 void set_offset_array(size_t index, u_char offset, bool reducing = false) { 134 check_reducing_assertion(reducing); 135 assert(index < _vs.committed_size(), "index out of range"); 136 assert(!reducing || _offset_array[index] >= offset, "Not reducing"); 137 _offset_array[index] = offset; 138 } 139 140 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) { 141 check_reducing_assertion(reducing); 142 assert(index < _vs.committed_size(), "index out of range"); 143 assert(high >= low, "addresses out of order"); 144 assert(pointer_delta(high, low) <= N_words, "offset too large"); 145 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low), 146 "Not reducing"); 147 _offset_array[index] = (u_char)pointer_delta(high, low); 148 } 149 150 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) { 151 check_reducing_assertion(reducing); 152 assert(index_for(right - 1) < _vs.committed_size(), 153 "right address out of range"); 154 assert(left < right, "Heap addresses out of order"); 155 size_t num_cards = pointer_delta(right, left) >> LogN_words; 156 157 // Below, we may use an explicit loop instead of memset() 158 // because on certain platforms memset() can give concurrent 159 // readers "out-of-thin-air," phantom zeros; see 6948537. 160 if (UseMemSetInBOT) { 161 memset(&_offset_array[index_for(left)], offset, num_cards); 162 } else { 163 size_t i = index_for(left); 164 const size_t end = i + num_cards; 165 for (; i < end; i++) { 166 // Elided until CR 6977974 is fixed properly. 167 // assert(!reducing || _offset_array[i] >= offset, "Not reducing"); 168 _offset_array[i] = offset; 169 } 170 } 171 } 172 173 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) { 174 check_reducing_assertion(reducing); 175 assert(right < _vs.committed_size(), "right address out of range"); 176 assert(left <= right, "indexes out of order"); 177 size_t num_cards = right - left + 1; 178 179 // Below, we may use an explicit loop instead of memset 180 // because on certain platforms memset() can give concurrent 181 // readers "out-of-thin-air," phantom zeros; see 6948537. 182 if (UseMemSetInBOT) { 183 memset(&_offset_array[left], offset, num_cards); 184 } else { 185 size_t i = left; 186 const size_t end = i + num_cards; 187 for (; i < end; i++) { 188 // Elided until CR 6977974 is fixed properly. 189 // assert(!reducing || _offset_array[i] >= offset, "Not reducing"); 190 _offset_array[i] = offset; 191 } 192 } 193 } 194 195 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { 196 assert(index < _vs.committed_size(), "index out of range"); 197 assert(high >= low, "addresses out of order"); 198 assert(pointer_delta(high, low) <= N_words, "offset too large"); 199 assert(_offset_array[index] == pointer_delta(high, low), 200 "Wrong offset"); 201 } 202 203 bool is_card_boundary(HeapWord* p) const; 204 205 // Return the number of slots needed for an offset array 206 // that covers mem_region_words words. 207 // We always add an extra slot because if an object 208 // ends on a card boundary we put a 0 in the next 209 // offset array slot, so we want that slot always 210 // to be reserved. 211 212 size_t compute_size(size_t mem_region_words) { 213 size_t number_of_slots = (mem_region_words / N_words) + 1; 214 return ReservedSpace::allocation_align_size_up(number_of_slots); 215 } 216 217 public: 218 // Initialize the table to cover from "base" to (at least) 219 // "base + init_word_size". In the future, the table may be expanded 220 // (see "resize" below) up to the size of "_reserved" (which must be at 221 // least "init_word_size".) The contents of the initial table are 222 // undefined; it is the responsibility of the constituent 223 // BlockOffsetTable(s) to initialize cards. 224 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); 225 226 // Notes a change in the committed size of the region covered by the 227 // table. The "new_word_size" may not be larger than the size of the 228 // reserved region this table covers. 229 void resize(size_t new_word_size); 230 231 void set_bottom(HeapWord* new_bottom); 232 233 // Whether entries should be initialized to zero. Used currently only for 234 // error checking. 235 void set_init_to_zero(bool val) { _init_to_zero = val; } 236 bool init_to_zero() { return _init_to_zero; } 237 238 // Updates all the BlockOffsetArray's sharing this shared array to 239 // reflect the current "top"'s of their spaces. 240 void update_offset_arrays(); // Not yet implemented! 241 242 // Return the appropriate index into "_offset_array" for "p". 243 size_t index_for(const void* p) const; 244 245 // Return the address indicating the start of the region corresponding to 246 // "index" in "_offset_array". 247 HeapWord* address_for_index(size_t index) const; 248 249 // Return the address "p" incremented by the size of 250 // a region. This method does not align the address 251 // returned to the start of a region. It is a simple 252 // primitive. 253 HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; } 254 255 // Shared space support 256 void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end); 257 }; 258 259 ////////////////////////////////////////////////////////////////////////// 260 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. 261 ////////////////////////////////////////////////////////////////////////// 262 class BlockOffsetArray: public BlockOffsetTable { 263 friend class VMStructs; 264 friend class G1BlockOffsetArray; // temp. until we restructure and cleanup 265 protected: 266 // The following enums are used by do_block_internal() below 267 enum Action { 268 Action_single, // BOT records a single block (see single_block()) 269 Action_mark, // BOT marks the start of a block (see mark_block()) 270 Action_check // Check that BOT records block correctly 271 // (see verify_single_block()). 272 }; 273 274 enum SomePrivateConstants { 275 N_words = BlockOffsetSharedArray::N_words, 276 LogN = BlockOffsetSharedArray::LogN, 277 // entries "e" of at least N_words mean "go back by Base^(e-N_words)." 278 // All entries are less than "N_words + N_powers". 279 LogBase = 4, 280 Base = (1 << LogBase), 281 N_powers = 14 282 }; 283 284 static size_t power_to_cards_back(uint i) { 285 return (size_t)(1 << (LogBase * i)); 286 } 287 static size_t power_to_words_back(uint i) { 288 return power_to_cards_back(i) * N_words; 289 } 290 static size_t entry_to_cards_back(u_char entry) { 291 assert(entry >= N_words, "Precondition"); 292 return power_to_cards_back(entry - N_words); 293 } 294 static size_t entry_to_words_back(u_char entry) { 295 assert(entry >= N_words, "Precondition"); 296 return power_to_words_back(entry - N_words); 297 } 298 299 // The shared array, which is shared with other BlockOffsetArray's 300 // corresponding to different spaces within a generation or span of 301 // memory. 302 BlockOffsetSharedArray* _array; 303 304 // The space that owns this subregion. 305 Space* _sp; 306 307 // If true, array entries are initialized to 0; otherwise, they are 308 // initialized to point backwards to the beginning of the covered region. 309 bool _init_to_zero; 310 311 // An assertion-checking helper method for the set_remainder*() methods below. 312 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); } 313 314 // Sets the entries 315 // corresponding to the cards starting at "start" and ending at "end" 316 // to point back to the card before "start": the interval [start, end) 317 // is right-open. The last parameter, reducing, indicates whether the 318 // updates to individual entries always reduce the entry from a higher 319 // to a lower value. (For example this would hold true during a temporal 320 // regime during which only block splits were updating the BOT. 321 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false); 322 // Same as above, except that the args here are a card _index_ interval 323 // that is closed: [start_index, end_index] 324 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false); 325 326 // A helper function for BOT adjustment/verification work 327 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false); 328 329 public: 330 // The space may not have its bottom and top set yet, which is why the 331 // region is passed as a parameter. If "init_to_zero" is true, the 332 // elements of the array are initialized to zero. Otherwise, they are 333 // initialized to point backwards to the beginning. 334 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, 335 bool init_to_zero_); 336 337 // Note: this ought to be part of the constructor, but that would require 338 // "this" to be passed as a parameter to a member constructor for 339 // the containing concrete subtype of Space. 340 // This would be legal C++, but MS VC++ doesn't allow it. 341 void set_space(Space* sp) { _sp = sp; } 342 343 // Resets the covered region to the given "mr". 344 void set_region(MemRegion mr) { 345 _bottom = mr.start(); 346 _end = mr.end(); 347 } 348 349 // Note that the committed size of the covered space may have changed, 350 // so the table size might also wish to change. 351 virtual void resize(size_t new_word_size) { 352 HeapWord* new_end = _bottom + new_word_size; 353 if (_end < new_end && !init_to_zero()) { 354 // verify that the old and new boundaries are also card boundaries 355 assert(_array->is_card_boundary(_end), 356 "_end not a card boundary"); 357 assert(_array->is_card_boundary(new_end), 358 "new _end would not be a card boundary"); 359 // set all the newly added cards 360 _array->set_offset_array(_end, new_end, N_words); 361 } 362 _end = new_end; // update _end 363 } 364 365 // Adjust the BOT to show that it has a single block in the 366 // range [blk_start, blk_start + size). All necessary BOT 367 // cards are adjusted, but _unallocated_block isn't. 368 void single_block(HeapWord* blk_start, HeapWord* blk_end); 369 void single_block(HeapWord* blk, size_t size) { 370 single_block(blk, blk + size); 371 } 372 373 // When the alloc_block() call returns, the block offset table should 374 // have enough information such that any subsequent block_start() call 375 // with an argument equal to an address that is within the range 376 // [blk_start, blk_end) would return the value blk_start, provided 377 // there have been no calls in between that reset this information 378 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call 379 // for an appropriate range covering the said interval). 380 // These methods expect to be called with [blk_start, blk_end) 381 // representing a block of memory in the heap. 382 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); 383 void alloc_block(HeapWord* blk, size_t size) { 384 alloc_block(blk, blk + size); 385 } 386 387 // If true, initialize array slots with no allocated blocks to zero. 388 // Otherwise, make them point back to the front. 389 bool init_to_zero() { return _init_to_zero; } 390 // Corresponding setter 391 void set_init_to_zero(bool val) { 392 _init_to_zero = val; 393 assert(_array != NULL, "_array should be non-NULL"); 394 _array->set_init_to_zero(val); 395 } 396 397 // Debugging 398 // Return the index of the last entry in the "active" region. 399 virtual size_t last_active_index() const = 0; 400 // Verify the block offset table 401 void verify() const; 402 void check_all_cards(size_t left_card, size_t right_card) const; 403 }; 404 405 //////////////////////////////////////////////////////////////////////////// 406 // A subtype of BlockOffsetArray that takes advantage of the fact 407 // that its underlying space is a NonContiguousSpace, so that some 408 // specialized interfaces can be made available for spaces that 409 // manipulate the table. 410 //////////////////////////////////////////////////////////////////////////// 411 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { 412 friend class VMStructs; 413 private: 414 // The portion [_unallocated_block, _sp.end()) of the space that 415 // is a single block known not to contain any objects. 416 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. 417 HeapWord* _unallocated_block; 418 419 public: 420 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): 421 BlockOffsetArray(array, mr, false), 422 _unallocated_block(_bottom) { } 423 424 // accessor 425 HeapWord* unallocated_block() const { 426 assert(BlockOffsetArrayUseUnallocatedBlock, 427 "_unallocated_block is not being maintained"); 428 return _unallocated_block; 429 } 430 431 void set_unallocated_block(HeapWord* block) { 432 assert(BlockOffsetArrayUseUnallocatedBlock, 433 "_unallocated_block is not being maintained"); 434 assert(block >= _bottom && block <= _end, "out of range"); 435 _unallocated_block = block; 436 } 437 438 // These methods expect to be called with [blk_start, blk_end) 439 // representing a block of memory in the heap. 440 void alloc_block(HeapWord* blk_start, HeapWord* blk_end); 441 void alloc_block(HeapWord* blk, size_t size) { 442 alloc_block(blk, blk + size); 443 } 444 445 // The following methods are useful and optimized for a 446 // non-contiguous space. 447 448 // Given a block [blk_start, blk_start + full_blk_size), and 449 // a left_blk_size < full_blk_size, adjust the BOT to show two 450 // blocks [blk_start, blk_start + left_blk_size) and 451 // [blk_start + left_blk_size, blk_start + full_blk_size). 452 // It is assumed (and verified in the non-product VM) that the 453 // BOT was correct for the original block. 454 void split_block(HeapWord* blk_start, size_t full_blk_size, 455 size_t left_blk_size); 456 457 // Adjust BOT to show that it has a block in the range 458 // [blk_start, blk_start + size). Only the first card 459 // of BOT is touched. It is assumed (and verified in the 460 // non-product VM) that the remaining cards of the block 461 // are correct. 462 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false); 463 void mark_block(HeapWord* blk, size_t size, bool reducing = false) { 464 mark_block(blk, blk + size, reducing); 465 } 466 467 // Adjust _unallocated_block to indicate that a particular 468 // block has been newly allocated or freed. It is assumed (and 469 // verified in the non-product VM) that the BOT is correct for 470 // the given block. 471 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) { 472 // Verify that the BOT shows [blk, blk + blk_size) to be one block. 473 verify_single_block(blk_start, blk_end); 474 if (BlockOffsetArrayUseUnallocatedBlock) { 475 _unallocated_block = MAX2(_unallocated_block, blk_end); 476 } 477 } 478 479 void allocated(HeapWord* blk, size_t size, bool reducing = false) { 480 allocated(blk, blk + size, reducing); 481 } 482 483 void freed(HeapWord* blk_start, HeapWord* blk_end); 484 void freed(HeapWord* blk, size_t size); 485 486 HeapWord* block_start_unsafe(const void* addr) const; 487 488 // Requires "addr" to be the start of a card and returns the 489 // start of the block that contains the given address. 490 HeapWord* block_start_careful(const void* addr) const; 491 492 // Verification & debugging: ensure that the offset table reflects 493 // the fact that the block [blk_start, blk_end) or [blk, blk + size) 494 // is a single block of storage. NOTE: can't const this because of 495 // call to non-const do_block_internal() below. 496 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) 497 PRODUCT_RETURN; 498 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; 499 500 // Verify that the given block is before _unallocated_block 501 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) 502 const PRODUCT_RETURN; 503 void verify_not_unallocated(HeapWord* blk, size_t size) 504 const PRODUCT_RETURN; 505 506 // Debugging support 507 virtual size_t last_active_index() const; 508 }; 509 510 //////////////////////////////////////////////////////////////////////////// 511 // A subtype of BlockOffsetArray that takes advantage of the fact 512 // that its underlying space is a ContiguousSpace, so that its "active" 513 // region can be more efficiently tracked (than for a non-contiguous space). 514 //////////////////////////////////////////////////////////////////////////// 515 class BlockOffsetArrayContigSpace: public BlockOffsetArray { 516 friend class VMStructs; 517 private: 518 // allocation boundary at which offset array must be updated 519 HeapWord* _next_offset_threshold; 520 size_t _next_offset_index; // index corresponding to that boundary 521 522 // Work function when allocation start crosses threshold. 523 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); 524 525 public: 526 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): 527 BlockOffsetArray(array, mr, true) { 528 _next_offset_threshold = NULL; 529 _next_offset_index = 0; 530 } 531 532 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } 533 534 // Initialize the threshold for an empty heap. 535 HeapWord* initialize_threshold(); 536 // Zero out the entry for _bottom (offset will be zero) 537 void zero_bottom_entry(); 538 539 // Return the next threshold, the point at which the table should be 540 // updated. 541 HeapWord* threshold() const { return _next_offset_threshold; } 542 543 // In general, these methods expect to be called with 544 // [blk_start, blk_end) representing a block of memory in the heap. 545 // In this implementation, however, we are OK even if blk_start and/or 546 // blk_end are NULL because NULL is represented as 0, and thus 547 // never exceeds the "_next_offset_threshold". 548 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { 549 if (blk_end > _next_offset_threshold) { 550 alloc_block_work(blk_start, blk_end); 551 } 552 } 553 void alloc_block(HeapWord* blk, size_t size) { 554 alloc_block(blk, blk + size); 555 } 556 557 HeapWord* block_start_unsafe(const void* addr) const; 558 559 void serialize(SerializeOopClosure* soc); 560 561 // Debugging support 562 virtual size_t last_active_index() const; 563 };