1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP
  26 #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP
  27 
  28 #include "memory/memRegion.hpp"
  29 #include "runtime/virtualspace.hpp"
  30 #include "utilities/globalDefinitions.hpp"
  31 
  32 // The CollectedHeap type requires subtypes to implement a method
  33 // "block_start".  For some subtypes, notably generational
  34 // systems using card-table-based write barriers, the efficiency of this
  35 // operation may be important.  Implementations of the "BlockOffsetArray"
  36 // class may be useful in providing such efficient implementations.
  37 //
  38 // BlockOffsetTable (abstract)
  39 //   - BlockOffsetArray (abstract)
  40 //     - BlockOffsetArrayNonContigSpace
  41 //     - BlockOffsetArrayContigSpace
  42 //
  43 
  44 class ContiguousSpace;
  45 
  46 //////////////////////////////////////////////////////////////////////////
  47 // The BlockOffsetTable "interface"
  48 //////////////////////////////////////////////////////////////////////////
  49 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
  50   friend class VMStructs;
  51 protected:
  52   // These members describe the region covered by the table.
  53 
  54   // The space this table is covering.
  55   HeapWord* _bottom;    // == reserved.start
  56   HeapWord* _end;       // End of currently allocated region.
  57 
  58 public:
  59   // Initialize the table to cover the given space.
  60   // The contents of the initial table are undefined.
  61   BlockOffsetTable(HeapWord* bottom, HeapWord* end):
  62     _bottom(bottom), _end(end) {
  63     assert(_bottom <= _end, "arguments out of order");
  64   }
  65 
  66   // Note that the committed size of the covered space may have changed,
  67   // so the table size might also wish to change.
  68   virtual void resize(size_t new_word_size) = 0;
  69 
  70   virtual void set_bottom(HeapWord* new_bottom) {
  71     assert(new_bottom <= _end, "new_bottom > _end");
  72     _bottom = new_bottom;
  73     resize(pointer_delta(_end, _bottom));
  74   }
  75 
  76   // Requires "addr" to be contained by a block, and returns the address of
  77   // the start of that block.
  78   virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
  79 
  80   // Returns the address of the start of the block containing "addr", or
  81   // else "null" if it is covered by no block.
  82   HeapWord* block_start(const void* addr) const;
  83 };
  84 
  85 //////////////////////////////////////////////////////////////////////////
  86 // One implementation of "BlockOffsetTable," the BlockOffsetArray,
  87 // divides the covered region into "N"-word subregions (where
  88 // "N" = 2^"LogN".  An array with an entry for each such subregion
  89 // indicates how far back one must go to find the start of the
  90 // chunk that includes the first word of the subregion.
  91 //
  92 // Each BlockOffsetArray is owned by a Space.  However, the actual array
  93 // may be shared by several BlockOffsetArrays; this is useful
  94 // when a single resizable area (such as a generation) is divided up into
  95 // several spaces in which contiguous allocation takes place.  (Consider,
  96 // for example, the garbage-first generation.)
  97 
  98 // Here is the shared array type.
  99 //////////////////////////////////////////////////////////////////////////
 100 // BlockOffsetSharedArray
 101 //////////////////////////////////////////////////////////////////////////
 102 class BlockOffsetSharedArray: public CHeapObj<mtGC> {
 103   friend class BlockOffsetArray;
 104   friend class BlockOffsetArrayNonContigSpace;
 105   friend class BlockOffsetArrayContigSpace;
 106   friend class VMStructs;
 107 
 108  private:
 109   enum SomePrivateConstants {
 110     LogN = 9,
 111     LogN_words = LogN - LogHeapWordSize,
 112     N_bytes = 1 << LogN,
 113     N_words = 1 << LogN_words
 114   };
 115 
 116   bool _init_to_zero;
 117 
 118   // The reserved region covered by the shared array.
 119   MemRegion _reserved;
 120 
 121   // End of the current committed region.
 122   HeapWord* _end;
 123 
 124   // Array for keeping offsets for retrieving object start fast given an
 125   // address.
 126   VirtualSpace _vs;
 127   u_char* _offset_array;          // byte array keeping backwards offsets
 128 
 129  protected:
 130   // Bounds checking accessors:
 131   // For performance these have to devolve to array accesses in product builds.
 132   u_char offset_array(size_t index) const {
 133     assert(index < _vs.committed_size(), "index out of range");
 134     return _offset_array[index];
 135   }
 136   // An assertion-checking helper method for the set_offset_array() methods below.
 137   void check_reducing_assertion(bool reducing);
 138 
 139   void set_offset_array(size_t index, u_char offset, bool reducing = false) {
 140     check_reducing_assertion(reducing);
 141     assert(index < _vs.committed_size(), "index out of range");
 142     assert(!reducing || _offset_array[index] >= offset, "Not reducing");
 143     _offset_array[index] = offset;
 144   }
 145 
 146   void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
 147     check_reducing_assertion(reducing);
 148     assert(index < _vs.committed_size(), "index out of range");
 149     assert(high >= low, "addresses out of order");
 150     assert(pointer_delta(high, low) <= N_words, "offset too large");
 151     assert(!reducing || _offset_array[index] >=  (u_char)pointer_delta(high, low),
 152            "Not reducing");
 153     _offset_array[index] = (u_char)pointer_delta(high, low);
 154   }
 155 
 156   void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
 157     check_reducing_assertion(reducing);
 158     assert(index_for(right - 1) < _vs.committed_size(),
 159            "right address out of range");
 160     assert(left  < right, "Heap addresses out of order");
 161     size_t num_cards = pointer_delta(right, left) >> LogN_words;
 162 
 163     // Below, we may use an explicit loop instead of memset()
 164     // because on certain platforms memset() can give concurrent
 165     // readers "out-of-thin-air," phantom zeros; see 6948537.
 166     if (UseMemSetInBOT) {
 167       memset(&_offset_array[index_for(left)], offset, num_cards);
 168     } else {
 169       size_t i = index_for(left);
 170       const size_t end = i + num_cards;
 171       for (; i < end; i++) {
 172         // Elided until CR 6977974 is fixed properly.
 173         // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
 174         _offset_array[i] = offset;
 175       }
 176     }
 177   }
 178 
 179   void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
 180     check_reducing_assertion(reducing);
 181     assert(right < _vs.committed_size(), "right address out of range");
 182     assert(left  <= right, "indexes out of order");
 183     size_t num_cards = right - left + 1;
 184 
 185     // Below, we may use an explicit loop instead of memset
 186     // because on certain platforms memset() can give concurrent
 187     // readers "out-of-thin-air," phantom zeros; see 6948537.
 188     if (UseMemSetInBOT) {
 189       memset(&_offset_array[left], offset, num_cards);
 190     } else {
 191       size_t i = left;
 192       const size_t end = i + num_cards;
 193       for (; i < end; i++) {
 194         // Elided until CR 6977974 is fixed properly.
 195         // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
 196         _offset_array[i] = offset;
 197       }
 198     }
 199   }
 200 
 201   void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
 202     assert(index < _vs.committed_size(), "index out of range");
 203     assert(high >= low, "addresses out of order");
 204     assert(pointer_delta(high, low) <= N_words, "offset too large");
 205     assert(_offset_array[index] == pointer_delta(high, low),
 206            "Wrong offset");
 207   }
 208 
 209   bool is_card_boundary(HeapWord* p) const;
 210 
 211   // Return the number of slots needed for an offset array
 212   // that covers mem_region_words words.
 213   // We always add an extra slot because if an object
 214   // ends on a card boundary we put a 0 in the next
 215   // offset array slot, so we want that slot always
 216   // to be reserved.
 217 
 218   size_t compute_size(size_t mem_region_words) {
 219     size_t number_of_slots = (mem_region_words / N_words) + 1;
 220     return ReservedSpace::allocation_align_size_up(number_of_slots);
 221   }
 222 
 223 public:
 224   // Initialize the table to cover from "base" to (at least)
 225   // "base + init_word_size".  In the future, the table may be expanded
 226   // (see "resize" below) up to the size of "_reserved" (which must be at
 227   // least "init_word_size".)  The contents of the initial table are
 228   // undefined; it is the responsibility of the constituent
 229   // BlockOffsetTable(s) to initialize cards.
 230   BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
 231 
 232   // Notes a change in the committed size of the region covered by the
 233   // table.  The "new_word_size" may not be larger than the size of the
 234   // reserved region this table covers.
 235   void resize(size_t new_word_size);
 236 
 237   void set_bottom(HeapWord* new_bottom);
 238 
 239   // Whether entries should be initialized to zero. Used currently only for
 240   // error checking.
 241   void set_init_to_zero(bool val) { _init_to_zero = val; }
 242   bool init_to_zero() { return _init_to_zero; }
 243 
 244   // Updates all the BlockOffsetArray's sharing this shared array to
 245   // reflect the current "top"'s of their spaces.
 246   void update_offset_arrays();   // Not yet implemented!
 247 
 248   // Return the appropriate index into "_offset_array" for "p".
 249   size_t index_for(const void* p) const;
 250 
 251   // Return the address indicating the start of the region corresponding to
 252   // "index" in "_offset_array".
 253   HeapWord* address_for_index(size_t index) const;
 254 };
 255 
 256 //////////////////////////////////////////////////////////////////////////
 257 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
 258 //////////////////////////////////////////////////////////////////////////
 259 class BlockOffsetArray: public BlockOffsetTable {
 260   friend class VMStructs;
 261   friend class G1BlockOffsetArray; // temp. until we restructure and cleanup
 262  protected:
 263   // The following enums are used by do_block_internal() below
 264   enum Action {
 265     Action_single,      // BOT records a single block (see single_block())
 266     Action_mark,        // BOT marks the start of a block (see mark_block())
 267     Action_check        // Check that BOT records block correctly
 268                         // (see verify_single_block()).
 269   };
 270 
 271   enum SomePrivateConstants {
 272     N_words = BlockOffsetSharedArray::N_words,
 273     LogN    = BlockOffsetSharedArray::LogN,
 274     // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
 275     // All entries are less than "N_words + N_powers".
 276     LogBase = 4,
 277     Base = (1 << LogBase),
 278     N_powers = 14
 279   };
 280 
 281   static size_t power_to_cards_back(uint i) {
 282     return (size_t)1 << (LogBase * i);
 283   }
 284   static size_t power_to_words_back(uint i) {
 285     return power_to_cards_back(i) * N_words;
 286   }
 287   static size_t entry_to_cards_back(u_char entry) {
 288     assert(entry >= N_words, "Precondition");
 289     return power_to_cards_back(entry - N_words);
 290   }
 291   static size_t entry_to_words_back(u_char entry) {
 292     assert(entry >= N_words, "Precondition");
 293     return power_to_words_back(entry - N_words);
 294   }
 295 
 296   // The shared array, which is shared with other BlockOffsetArray's
 297   // corresponding to different spaces within a generation or span of
 298   // memory.
 299   BlockOffsetSharedArray* _array;
 300 
 301   // The space that owns this subregion.
 302   Space* _sp;
 303 
 304   // If true, array entries are initialized to 0; otherwise, they are
 305   // initialized to point backwards to the beginning of the covered region.
 306   bool _init_to_zero;
 307 
 308   // An assertion-checking helper method for the set_remainder*() methods below.
 309   void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
 310 
 311   // Sets the entries
 312   // corresponding to the cards starting at "start" and ending at "end"
 313   // to point back to the card before "start": the interval [start, end)
 314   // is right-open. The last parameter, reducing, indicates whether the
 315   // updates to individual entries always reduce the entry from a higher
 316   // to a lower value. (For example this would hold true during a temporal
 317   // regime during which only block splits were updating the BOT.
 318   void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
 319   // Same as above, except that the args here are a card _index_ interval
 320   // that is closed: [start_index, end_index]
 321   void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
 322 
 323   // A helper function for BOT adjustment/verification work
 324   void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);
 325 
 326  public:
 327   // The space may not have its bottom and top set yet, which is why the
 328   // region is passed as a parameter.  If "init_to_zero" is true, the
 329   // elements of the array are initialized to zero.  Otherwise, they are
 330   // initialized to point backwards to the beginning.
 331   BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
 332                    bool init_to_zero_);
 333 
 334   // Note: this ought to be part of the constructor, but that would require
 335   // "this" to be passed as a parameter to a member constructor for
 336   // the containing concrete subtype of Space.
 337   // This would be legal C++, but MS VC++ doesn't allow it.
 338   void set_space(Space* sp) { _sp = sp; }
 339 
 340   // Resets the covered region to the given "mr".
 341   void set_region(MemRegion mr) {
 342     _bottom = mr.start();
 343     _end = mr.end();
 344   }
 345 
 346   // Note that the committed size of the covered space may have changed,
 347   // so the table size might also wish to change.
 348   virtual void resize(size_t new_word_size) {
 349     HeapWord* new_end = _bottom + new_word_size;
 350     if (_end < new_end && !init_to_zero()) {
 351       // verify that the old and new boundaries are also card boundaries
 352       assert(_array->is_card_boundary(_end),
 353              "_end not a card boundary");
 354       assert(_array->is_card_boundary(new_end),
 355              "new _end would not be a card boundary");
 356       // set all the newly added cards
 357       _array->set_offset_array(_end, new_end, N_words);
 358     }
 359     _end = new_end;  // update _end
 360   }
 361 
 362   // Adjust the BOT to show that it has a single block in the
 363   // range [blk_start, blk_start + size). All necessary BOT
 364   // cards are adjusted, but _unallocated_block isn't.
 365   void single_block(HeapWord* blk_start, HeapWord* blk_end);
 366   void single_block(HeapWord* blk, size_t size) {
 367     single_block(blk, blk + size);
 368   }
 369 
 370   // When the alloc_block() call returns, the block offset table should
 371   // have enough information such that any subsequent block_start() call
 372   // with an argument equal to an address that is within the range
 373   // [blk_start, blk_end) would return the value blk_start, provided
 374   // there have been no calls in between that reset this information
 375   // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
 376   // for an appropriate range covering the said interval).
 377   // These methods expect to be called with [blk_start, blk_end)
 378   // representing a block of memory in the heap.
 379   virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
 380   void alloc_block(HeapWord* blk, size_t size) {
 381     alloc_block(blk, blk + size);
 382   }
 383 
 384   // If true, initialize array slots with no allocated blocks to zero.
 385   // Otherwise, make them point back to the front.
 386   bool init_to_zero() { return _init_to_zero; }
 387   // Corresponding setter
 388   void set_init_to_zero(bool val) {
 389     _init_to_zero = val;
 390     assert(_array != NULL, "_array should be non-NULL");
 391     _array->set_init_to_zero(val);
 392   }
 393 
 394   // Debugging
 395   // Return the index of the last entry in the "active" region.
 396   virtual size_t last_active_index() const = 0;
 397   // Verify the block offset table
 398   void verify() const;
 399   void check_all_cards(size_t left_card, size_t right_card) const;
 400 };
 401 
 402 ////////////////////////////////////////////////////////////////////////////
 403 // A subtype of BlockOffsetArray that takes advantage of the fact
 404 // that its underlying space is a NonContiguousSpace, so that some
 405 // specialized interfaces can be made available for spaces that
 406 // manipulate the table.
 407 ////////////////////////////////////////////////////////////////////////////
 408 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
 409   friend class VMStructs;
 410  private:
 411   // The portion [_unallocated_block, _sp.end()) of the space that
 412   // is a single block known not to contain any objects.
 413   // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
 414   HeapWord* _unallocated_block;
 415 
 416  public:
 417   BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
 418     BlockOffsetArray(array, mr, false),
 419     _unallocated_block(_bottom) { }
 420 
 421   // Accessor
 422   HeapWord* unallocated_block() const {
 423     assert(BlockOffsetArrayUseUnallocatedBlock,
 424            "_unallocated_block is not being maintained");
 425     return _unallocated_block;
 426   }
 427 
 428   void set_unallocated_block(HeapWord* block) {
 429     assert(BlockOffsetArrayUseUnallocatedBlock,
 430            "_unallocated_block is not being maintained");
 431     assert(block >= _bottom && block <= _end, "out of range");
 432     _unallocated_block = block;
 433   }
 434 
 435   // These methods expect to be called with [blk_start, blk_end)
 436   // representing a block of memory in the heap.
 437   void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
 438   void alloc_block(HeapWord* blk, size_t size) {
 439     alloc_block(blk, blk + size);
 440   }
 441 
 442   // The following methods are useful and optimized for a
 443   // non-contiguous space.
 444 
 445   // Given a block [blk_start, blk_start + full_blk_size), and
 446   // a left_blk_size < full_blk_size, adjust the BOT to show two
 447   // blocks [blk_start, blk_start + left_blk_size) and
 448   // [blk_start + left_blk_size, blk_start + full_blk_size).
 449   // It is assumed (and verified in the non-product VM) that the
 450   // BOT was correct for the original block.
 451   void split_block(HeapWord* blk_start, size_t full_blk_size,
 452                            size_t left_blk_size);
 453 
 454   // Adjust BOT to show that it has a block in the range
 455   // [blk_start, blk_start + size). Only the first card
 456   // of BOT is touched. It is assumed (and verified in the
 457   // non-product VM) that the remaining cards of the block
 458   // are correct.
 459   void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
 460   void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
 461     mark_block(blk, blk + size, reducing);
 462   }
 463 
 464   // Adjust _unallocated_block to indicate that a particular
 465   // block has been newly allocated or freed. It is assumed (and
 466   // verified in the non-product VM) that the BOT is correct for
 467   // the given block.
 468   void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
 469     // Verify that the BOT shows [blk, blk + blk_size) to be one block.
 470     verify_single_block(blk_start, blk_end);
 471     if (BlockOffsetArrayUseUnallocatedBlock) {
 472       _unallocated_block = MAX2(_unallocated_block, blk_end);
 473     }
 474   }
 475 
 476   void allocated(HeapWord* blk, size_t size, bool reducing = false) {
 477     allocated(blk, blk + size, reducing);
 478   }
 479 
 480   void freed(HeapWord* blk_start, HeapWord* blk_end);
 481   void freed(HeapWord* blk, size_t size);
 482 
 483   HeapWord* block_start_unsafe(const void* addr) const;
 484 
 485   // Requires "addr" to be the start of a card and returns the
 486   // start of the block that contains the given address.
 487   HeapWord* block_start_careful(const void* addr) const;
 488 
 489   // Verification & debugging: ensure that the offset table reflects
 490   // the fact that the block [blk_start, blk_end) or [blk, blk + size)
 491   // is a single block of storage. NOTE: can't const this because of
 492   // call to non-const do_block_internal() below.
 493   void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
 494     PRODUCT_RETURN;
 495   void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
 496 
 497   // Verify that the given block is before _unallocated_block
 498   void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
 499     const PRODUCT_RETURN;
 500   void verify_not_unallocated(HeapWord* blk, size_t size)
 501     const PRODUCT_RETURN;
 502 
 503   // Debugging support
 504   virtual size_t last_active_index() const;
 505 };
 506 
 507 ////////////////////////////////////////////////////////////////////////////
 508 // A subtype of BlockOffsetArray that takes advantage of the fact
 509 // that its underlying space is a ContiguousSpace, so that its "active"
 510 // region can be more efficiently tracked (than for a non-contiguous space).
 511 ////////////////////////////////////////////////////////////////////////////
 512 class BlockOffsetArrayContigSpace: public BlockOffsetArray {
 513   friend class VMStructs;
 514  private:
 515   // allocation boundary at which offset array must be updated
 516   HeapWord* _next_offset_threshold;
 517   size_t    _next_offset_index;      // index corresponding to that boundary
 518 
 519   // Work function when allocation start crosses threshold.
 520   void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
 521 
 522  public:
 523   BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
 524     BlockOffsetArray(array, mr, true) {
 525     _next_offset_threshold = NULL;
 526     _next_offset_index = 0;
 527   }
 528 
 529   void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
 530 
 531   // Initialize the threshold for an empty heap.
 532   HeapWord* initialize_threshold();
 533   // Zero out the entry for _bottom (offset will be zero)
 534   void      zero_bottom_entry();
 535 
 536   // Return the next threshold, the point at which the table should be
 537   // updated.
 538   HeapWord* threshold() const { return _next_offset_threshold; }
 539 
 540   // In general, these methods expect to be called with
 541   // [blk_start, blk_end) representing a block of memory in the heap.
 542   // In this implementation, however, we are OK even if blk_start and/or
 543   // blk_end are NULL because NULL is represented as 0, and thus
 544   // never exceeds the "_next_offset_threshold".
 545   void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
 546     if (blk_end > _next_offset_threshold) {
 547       alloc_block_work(blk_start, blk_end);
 548     }
 549   }
 550   void alloc_block(HeapWord* blk, size_t size) {
 551     alloc_block(blk, blk + size);
 552   }
 553 
 554   HeapWord* block_start_unsafe(const void* addr) const;
 555 
 556   // Debugging support
 557   virtual size_t last_active_index() const;
 558 };
 559 
 560 #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP