src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc9 Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  27 #include "gc_implementation/g1/heapRegion.hpp"
  28 #include "memory/space.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/java.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 
  34 
  35 void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
  36   // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
  37   // retrieve it here since this would cause firing of several asserts. The code
  38   // executed after commit of a region already needs to do some re-initialization of
  39   // the HeapRegion, so we combine that.
  40 }
  41 
  42 //////////////////////////////////////////////////////////////////////
  43 // G1BlockOffsetSharedArray
  44 //////////////////////////////////////////////////////////////////////
  45 
  46 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
  47   _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
  48 
  49   _reserved = heap;
  50   _end = NULL;
  51 
  52   MemRegion bot_reserved = storage->reserved();
  53 
  54   _offset_array = (u_char*)bot_reserved.start();
  55   _end = _reserved.end();
  56 
  57   storage->set_mapping_changed_listener(&_listener);
  58 
  59   if (TraceBlockOffsetTable) {
  60     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
  61     gclog_or_tty->print_cr("  "
  62                   "  rs.base(): " PTR_FORMAT
  63                   "  rs.size(): " SIZE_FORMAT
  64                   "  rs end(): " PTR_FORMAT,
  65                   p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
  66   }
  67 }
  68 
  69 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
  70   assert(p >= _reserved.start(), "just checking");
  71   size_t delta = pointer_delta(p, _reserved.start());
  72   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
  73 }
  74 
  75 void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
  76   set_offset_array(index_for(left), index_for(right -1), offset);
  77 }
  78 
  79 //////////////////////////////////////////////////////////////////////
  80 // G1BlockOffsetArray
  81 //////////////////////////////////////////////////////////////////////
  82 
  83 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
  84                                        MemRegion mr, bool init_to_zero) :
  85   G1BlockOffsetTable(mr.start(), mr.end()),
  86   _unallocated_block(_bottom),
  87   _array(array), _gsp(NULL),
  88   _init_to_zero(init_to_zero) {
  89   assert(_bottom <= _end, "arguments out of order");
  90   if (!_init_to_zero) {
  91     // initialize cards to point back to mr.start()
  92     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
  93     _array->set_offset_array(0, 0);  // set first card to 0
  94   }
  95 }
  96 
  97 void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
  98   _gsp = sp;
  99 }
 100 
 101 // The arguments follow the normal convention of denoting
 102 // a right-open interval: [start, end)
 103 void
 104 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
 105 
 106   if (start >= end) {
 107     // The start address is equal to the end address (or to
 108     // the right of the end address) so there are not cards
 109     // that need to be updated..
 110     return;
 111   }
 112 
 113   // Write the backskip value for each region.
 114   //


 164   size_t start_card_for_region = start_card;
 165   u_char offset = max_jubyte;
 166   for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
 167     // -1 so that the the card with the actual offset is counted.  Another -1
 168     // so that the reach ends in this region and not at the start
 169     // of the next.
 170     size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
 171     offset = N_words + i;
 172     if (reach >= end_card) {
 173       _array->set_offset_array(start_card_for_region, end_card, offset);
 174       start_card_for_region = reach + 1;
 175       break;
 176     }
 177     _array->set_offset_array(start_card_for_region, reach, offset);
 178     start_card_for_region = reach + 1;
 179   }
 180   assert(start_card_for_region > end_card, "Sanity check");
 181   DEBUG_ONLY(check_all_cards(start_card, end_card);)
 182 }
 183 
 184 // The block [blk_start, blk_end) has been allocated;
 185 // adjust the block offset table to represent this information;
 186 // right-open interval: [blk_start, blk_end)
 187 void
 188 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
 189   mark_block(blk_start, blk_end);
 190   allocated(blk_start, blk_end);
 191 }
 192 
 193 // Adjust BOT to show that a previously whole block has been split
 194 // into two.
 195 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
 196                                      size_t left_blk_size) {
 197   // Verify that the BOT shows [blk, blk + blk_size) to be one block.
 198   verify_single_block(blk, blk_size);
 199   // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
 200   // is one single block.
 201   mark_block(blk + left_blk_size, blk + blk_size);
 202 }
 203 
 204 
 205 // Action_mark - update the BOT for the block [blk_start, blk_end).
 206 //               Current typical use is for splitting a block.
 207 // Action_single - update the BOT for an allocation.
 208 // Action_verify - BOT verification.
 209 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
 210                                            HeapWord* blk_end,
 211                                            Action action) {
 212   assert(Universe::heap()->is_in_reserved(blk_start),
 213          "reference must be into the heap");
 214   assert(Universe::heap()->is_in_reserved(blk_end-1),
 215          "limit must be within the heap");
 216   // This is optimized to make the test fast, assuming we only rarely
 217   // cross boundaries.
 218   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
 219   uintptr_t start_ui = (uintptr_t)blk_start;
 220   // Calculate the last card boundary preceding end of blk
 221   intptr_t boundary_before_end = (intptr_t)end_ui;
 222   clear_bits(boundary_before_end, right_n_bits(LogN));
 223   if (start_ui <= (uintptr_t)boundary_before_end) {
 224     // blk starts at or crosses a boundary
 225     // Calculate index of card on which blk begins
 226     size_t    start_index = _array->index_for(blk_start);
 227     // Index of card on which blk ends
 228     size_t    end_index   = _array->index_for(blk_end - 1);
 229     // Start address of card on which blk begins
 230     HeapWord* boundary    = _array->address_for_index(start_index);
 231     assert(boundary <= blk_start, "blk should start at or after boundary");
 232     if (blk_start != boundary) {
 233       // blk starts strictly after boundary
 234       // adjust card boundary and start_index forward to next card
 235       boundary += N_words;
 236       start_index++;
 237     }
 238     assert(start_index <= end_index, "monotonicity of index_for()");
 239     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
 240     switch (action) {
 241       case Action_mark: {
 242         if (init_to_zero()) {
 243           _array->set_offset_array(start_index, boundary, blk_start);
 244           break;
 245         } // Else fall through to the next case
 246       }
 247       case Action_single: {
 248         _array->set_offset_array(start_index, boundary, blk_start);
 249         // We have finished marking the "offset card". We need to now
 250         // mark the subsequent cards that this blk spans.
 251         if (start_index < end_index) {
 252           HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
 253           HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
 254           set_remainder_to_point_to_start(rem_st, rem_end);
 255         }
 256         break;
 257       }
 258       case Action_check: {
 259         _array->check_offset_array(start_index, boundary, blk_start);
 260         // We have finished checking the "offset card". We need to now
 261         // check the subsequent cards that this blk spans.
 262         check_all_cards(start_index + 1, end_index);
 263         break;
 264       }
 265       default:
 266         ShouldNotReachHere();
 267     }
 268   }
 269 }
 270 
 271 // The card-interval [start_card, end_card] is a closed interval; this
 272 // is an expensive check -- use with care and only under protection of
 273 // suitable flag.
 274 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
 275 
 276   if (end_card < start_card) {
 277     return;
 278   }
 279   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
 280   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
 281     u_char entry = _array->offset_array(c);
 282     if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
 283       guarantee(entry > N_words,
 284                 err_msg("Should be in logarithmic region - "
 285                         "entry: %u, "
 286                         "_array->offset_array(c): %u, "
 287                         "N_words: %u",
 288                         (uint)entry, (uint)_array->offset_array(c), (uint)N_words));
 289     }
 290     size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
 291     size_t landing_card = c - backskip;
 292     guarantee(landing_card >= (start_card - 1), "Inv");
 293     if (landing_card >= start_card) {
 294       guarantee(_array->offset_array(landing_card) <= entry,
 295                 err_msg("Monotonicity - landing_card offset: %u, "
 296                         "entry: %u",
 297                         (uint)_array->offset_array(landing_card), (uint)entry));
 298     } else {
 299       guarantee(landing_card == start_card - 1, "Tautology");
 300       // Note that N_words is the maximum offset value
 301       guarantee(_array->offset_array(landing_card) <= N_words,
 302                 err_msg("landing card offset: %u, "
 303                         "N_words: %u",
 304                         (uint)_array->offset_array(landing_card), (uint)N_words));
 305     }
 306   }
 307 }
 308 
 309 // The range [blk_start, blk_end) represents a single contiguous block
 310 // of storage; modify the block offset table to represent this
 311 // information; Right-open interval: [blk_start, blk_end)
 312 // NOTE: this method does _not_ adjust _unallocated_block.
 313 void
 314 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
 315   do_block_internal(blk_start, blk_end, Action_single);
 316 }
 317 
 318 // Mark the BOT such that if [blk_start, blk_end) straddles a card
 319 // boundary, the card following the first such boundary is marked
 320 // with the appropriate offset.
 321 // NOTE: this method does _not_ adjust _unallocated_block or
 322 // any cards subsequent to the first one.
 323 void
 324 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
 325   do_block_internal(blk_start, blk_end, Action_mark);
 326 }
 327 
 328 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
 329   assert(_bottom <= addr && addr < _end,
 330          "addr must be covered by this Array");
 331   // Must read this exactly once because it can be modified by parallel
 332   // allocation.
 333   HeapWord* ub = _unallocated_block;
 334   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 335     assert(ub < _end, "tautology (see above)");
 336     return ub;
 337   }
 338   // Otherwise, find the block start using the table.
 339   HeapWord* q = block_at_or_preceding(addr, false, 0);
 340   return forward_to_block_containing_addr(q, addr);
 341 }
 342 
 343 // This duplicates a little code from the above: unavoidable.
 344 HeapWord*
 345 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
 346   assert(_bottom <= addr && addr < _end,
 347          "addr must be covered by this Array");


 380                             (n_index == next_index ? 0 : N_words);
 381   assert(next_boundary <= _array->_end,
 382          err_msg("next_boundary is beyond the end of the covered region "
 383                  " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
 384                  p2i(next_boundary), p2i(_array->_end)));
 385   if (addr >= gsp()->top()) return gsp()->top();
 386   while (next_boundary < addr) {
 387     while (n <= next_boundary) {
 388       q = n;
 389       oop obj = oop(q);
 390       if (obj->klass_or_null() == NULL) return q;
 391       n += block_size(q);
 392     }
 393     assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
 394     // [q, n) is the block that crosses the boundary.
 395     alloc_block_work2(&next_boundary, &next_index, q, n);
 396   }
 397   return forward_to_block_containing_addr_const(q, n, addr);
 398 }
 399 
 400 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
 401   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
 402 
 403   assert(_bottom <= addr && addr < _end,
 404          "addr must be covered by this Array");
 405   // Must read this exactly once because it can be modified by parallel
 406   // allocation.
 407   HeapWord* ub = _unallocated_block;
 408   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 409     assert(ub < _end, "tautology (see above)");
 410     return ub;
 411   }
 412 
 413   // Otherwise, find the block start using the table, but taking
 414   // care (cf block_start_unsafe() above) not to parse any objects/blocks
 415   // on the cards themselves.
 416   size_t index = _array->index_for(addr);
 417   assert(_array->address_for_index(index) == addr,
 418          "arg should be start of card");
 419 
 420   HeapWord* q = (HeapWord*)addr;
 421   uint offset;
 422   do {
 423     offset = _array->offset_array(index--);
 424     q -= offset;
 425   } while (offset == N_words);
 426   assert(q <= addr, "block start should be to left of arg");
 427   return q;
 428 }
 429 
 430 // Note that the committed size of the covered space may have changed,
 431 // so the table size might also wish to change.
 432 void G1BlockOffsetArray::resize(size_t new_word_size) {
 433   HeapWord* new_end = _bottom + new_word_size;
 434   if (_end < new_end && !init_to_zero()) {
 435     // verify that the old and new boundaries are also card boundaries
 436     assert(_array->is_card_boundary(_end),
 437            "_end not a card boundary");
 438     assert(_array->is_card_boundary(new_end),
 439            "new _end would not be a card boundary");
 440     // set all the newly added cards
 441     _array->set_offset_array(_end, new_end, N_words);
 442   }
 443   _end = new_end;  // update _end
 444 }
 445 
 446 void G1BlockOffsetArray::set_region(MemRegion mr) {
 447   _bottom = mr.start();
 448   _end = mr.end();
 449 }
 450 
 451 //
 452 //              threshold_
 453 //              |   _index_
 454 //              v   v
 455 //      +-------+-------+-------+-------+-------+
 456 //      | i-1   |   i   | i+1   | i+2   | i+3   |
 457 //      +-------+-------+-------+-------+-------+
 458 //       ( ^    ]
 459 //         block-start
 460 //
 461 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
 462                                            HeapWord* blk_start, HeapWord* blk_end) {
 463   // For efficiency, do copy-in/copy-out.
 464   HeapWord* threshold = *threshold_;
 465   size_t    index = *index_;
 466 
 467   assert(blk_start != NULL && blk_end > blk_start,
 468          "phantom block");
 469   assert(blk_end > threshold, "should be past threshold");
 470   assert(blk_start <= threshold, "blk_start should be at or before threshold");


 589 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
 590   assert(_bottom <= addr && addr < _end,
 591          "addr must be covered by this Array");
 592   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 593   return forward_to_block_containing_addr(q, addr);
 594 }
 595 
 596 HeapWord*
 597 G1BlockOffsetArrayContigSpace::
 598 block_start_unsafe_const(const void* addr) const {
 599   assert(_bottom <= addr && addr < _end,
 600          "addr must be covered by this Array");
 601   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 602   HeapWord* n = q + block_size(q);
 603   return forward_to_block_containing_addr_const(q, n, addr);
 604 }
 605 
 606 G1BlockOffsetArrayContigSpace::
 607 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
 608                               MemRegion mr) :
 609   G1BlockOffsetArray(array, mr, true)
 610 {
 611   _next_offset_threshold = NULL;
 612   _next_offset_index = 0;
 613 }
 614 
 615 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
 616   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 617          "just checking");
 618   _next_offset_index = _array->index_for_raw(_bottom);
 619   _next_offset_index++;
 620   _next_offset_threshold =
 621     _array->address_for_index_raw(_next_offset_index);
 622   return _next_offset_threshold;
 623 }
 624 
 625 void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
 626   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 627          "just checking");
 628   size_t bottom_index = _array->index_for_raw(_bottom);
 629   assert(_array->address_for_index_raw(bottom_index) == _bottom,
 630          "Precondition of call");
 631   _array->set_offset_array_raw(bottom_index, 0);
 632 }
 633 
 634 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
 635   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 636          "just checking");
 637   _next_offset_index = _array->index_for(_bottom);
 638   _next_offset_index++;
 639   _next_offset_threshold =
 640     _array->address_for_index(_next_offset_index);
 641   return _next_offset_threshold;
 642 }
 643 
 644 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
 645   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 646          "just checking");
 647   size_t bottom_index = _array->index_for(_bottom);
 648   assert(_array->address_for_index(bottom_index) == _bottom,
 649          "Precondition of call");
 650   _array->set_offset_array(bottom_index, 0);
 651 }
 652 
 653 void
 654 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
 655   assert(new_top <= _end, "_end should have already been updated");
 656 
 657   // The first BOT entry should have offset 0.
 658   reset_bot();
 659   alloc_block(_bottom, new_top);
 660  }
 661 
 662 #ifndef PRODUCT
 663 void
 664 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
 665   G1BlockOffsetArray::print_on(out);
 666   out->print_cr("  next offset threshold: "PTR_FORMAT, p2i(_next_offset_threshold));
 667   out->print_cr("  next offset index:     "SIZE_FORMAT, _next_offset_index);
 668 }
 669 #endif // !PRODUCT


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  27 #include "gc_implementation/g1/heapRegion.hpp"
  28 #include "memory/space.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/java.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 
  34 







  35 //////////////////////////////////////////////////////////////////////
  36 // G1BlockOffsetSharedArray
  37 //////////////////////////////////////////////////////////////////////
  38 
  39 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
  40   _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
  41 
  42   _reserved = heap;
  43   _end = NULL;
  44 
  45   MemRegion bot_reserved = storage->reserved();
  46 
  47   _offset_array = (u_char*)bot_reserved.start();
  48   _end = _reserved.end();
  49 
  50   storage->set_mapping_changed_listener(&_listener);
  51 
  52   if (TraceBlockOffsetTable) {
  53     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
  54     gclog_or_tty->print_cr("  "
  55                   "  rs.base(): " PTR_FORMAT
  56                   "  rs.size(): " SIZE_FORMAT
  57                   "  rs end(): " PTR_FORMAT,
  58                   p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
  59   }
  60 }
  61 
  62 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
  63   assert(p >= _reserved.start(), "just checking");
  64   size_t delta = pointer_delta(p, _reserved.start());
  65   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
  66 }
  67 




  68 //////////////////////////////////////////////////////////////////////
  69 // G1BlockOffsetArray
  70 //////////////////////////////////////////////////////////////////////
  71 
  72 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
  73                                        MemRegion mr) :
  74   G1BlockOffsetTable(mr.start(), mr.end()),
  75   _unallocated_block(_bottom),
  76   _array(array), _gsp(NULL) {

  77   assert(_bottom <= _end, "arguments out of order");





  78 }
  79 
  80 void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
  81   _gsp = sp;
  82 }
  83 
  84 // The arguments follow the normal convention of denoting
  85 // a right-open interval: [start, end)
  86 void
  87 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
  88 
  89   if (start >= end) {
  90     // The start address is equal to the end address (or to
  91     // the right of the end address) so there are not cards
  92     // that need to be updated..
  93     return;
  94   }
  95 
  96   // Write the backskip value for each region.
  97   //


 147   size_t start_card_for_region = start_card;
 148   u_char offset = max_jubyte;
 149   for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
 150     // -1 so that the the card with the actual offset is counted.  Another -1
 151     // so that the reach ends in this region and not at the start
 152     // of the next.
 153     size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
 154     offset = N_words + i;
 155     if (reach >= end_card) {
 156       _array->set_offset_array(start_card_for_region, end_card, offset);
 157       start_card_for_region = reach + 1;
 158       break;
 159     }
 160     _array->set_offset_array(start_card_for_region, reach, offset);
 161     start_card_for_region = reach + 1;
 162   }
 163   assert(start_card_for_region > end_card, "Sanity check");
 164   DEBUG_ONLY(check_all_cards(start_card, end_card);)
 165 }
 166 























































































 167 // The card-interval [start_card, end_card] is a closed interval; this
 168 // is an expensive check -- use with care and only under protection of
 169 // suitable flag.
 170 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
 171 
 172   if (end_card < start_card) {
 173     return;
 174   }
 175   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
 176   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
 177     u_char entry = _array->offset_array(c);
 178     if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
 179       guarantee(entry > N_words,
 180                 err_msg("Should be in logarithmic region - "
 181                         "entry: %u, "
 182                         "_array->offset_array(c): %u, "
 183                         "N_words: %u",
 184                         (uint)entry, (uint)_array->offset_array(c), (uint)N_words));
 185     }
 186     size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
 187     size_t landing_card = c - backskip;
 188     guarantee(landing_card >= (start_card - 1), "Inv");
 189     if (landing_card >= start_card) {
 190       guarantee(_array->offset_array(landing_card) <= entry,
 191                 err_msg("Monotonicity - landing_card offset: %u, "
 192                         "entry: %u",
 193                         (uint)_array->offset_array(landing_card), (uint)entry));
 194     } else {
 195       guarantee(landing_card == start_card - 1, "Tautology");
 196       // Note that N_words is the maximum offset value
 197       guarantee(_array->offset_array(landing_card) <= N_words,
 198                 err_msg("landing card offset: %u, "
 199                         "N_words: %u",
 200                         (uint)_array->offset_array(landing_card), (uint)N_words));
 201     }
 202   }
 203 }
 204 



















 205 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
 206   assert(_bottom <= addr && addr < _end,
 207          "addr must be covered by this Array");
 208   // Must read this exactly once because it can be modified by parallel
 209   // allocation.
 210   HeapWord* ub = _unallocated_block;
 211   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 212     assert(ub < _end, "tautology (see above)");
 213     return ub;
 214   }
 215   // Otherwise, find the block start using the table.
 216   HeapWord* q = block_at_or_preceding(addr, false, 0);
 217   return forward_to_block_containing_addr(q, addr);
 218 }
 219 
 220 // This duplicates a little code from the above: unavoidable.
 221 HeapWord*
 222 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
 223   assert(_bottom <= addr && addr < _end,
 224          "addr must be covered by this Array");


 257                             (n_index == next_index ? 0 : N_words);
 258   assert(next_boundary <= _array->_end,
 259          err_msg("next_boundary is beyond the end of the covered region "
 260                  " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
 261                  p2i(next_boundary), p2i(_array->_end)));
 262   if (addr >= gsp()->top()) return gsp()->top();
 263   while (next_boundary < addr) {
 264     while (n <= next_boundary) {
 265       q = n;
 266       oop obj = oop(q);
 267       if (obj->klass_or_null() == NULL) return q;
 268       n += block_size(q);
 269     }
 270     assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
 271     // [q, n) is the block that crosses the boundary.
 272     alloc_block_work2(&next_boundary, &next_index, q, n);
 273   }
 274   return forward_to_block_containing_addr_const(q, n, addr);
 275 }
 276 






























 277 // Note that the committed size of the covered space may have changed,
 278 // so the table size might also wish to change.
 279 void G1BlockOffsetArray::resize(size_t new_word_size) {
 280   HeapWord* new_end = _bottom + new_word_size;









 281   _end = new_end;  // update _end
 282 }
 283 





 284 //
 285 //              threshold_
 286 //              |   _index_
 287 //              v   v
 288 //      +-------+-------+-------+-------+-------+
 289 //      | i-1   |   i   | i+1   | i+2   | i+3   |
 290 //      +-------+-------+-------+-------+-------+
 291 //       ( ^    ]
 292 //         block-start
 293 //
 294 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
 295                                            HeapWord* blk_start, HeapWord* blk_end) {
 296   // For efficiency, do copy-in/copy-out.
 297   HeapWord* threshold = *threshold_;
 298   size_t    index = *index_;
 299 
 300   assert(blk_start != NULL && blk_end > blk_start,
 301          "phantom block");
 302   assert(blk_end > threshold, "should be past threshold");
 303   assert(blk_start <= threshold, "blk_start should be at or before threshold");


 422 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
 423   assert(_bottom <= addr && addr < _end,
 424          "addr must be covered by this Array");
 425   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 426   return forward_to_block_containing_addr(q, addr);
 427 }
 428 
 429 HeapWord*
 430 G1BlockOffsetArrayContigSpace::
 431 block_start_unsafe_const(const void* addr) const {
 432   assert(_bottom <= addr && addr < _end,
 433          "addr must be covered by this Array");
 434   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 435   HeapWord* n = q + block_size(q);
 436   return forward_to_block_containing_addr_const(q, n, addr);
 437 }
 438 
 439 G1BlockOffsetArrayContigSpace::
 440 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
 441                               MemRegion mr) :
 442   G1BlockOffsetArray(array, mr)
 443 {
 444   _next_offset_threshold = NULL;
 445   _next_offset_index = 0;
 446 }
 447 
 448 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
 449   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 450          "just checking");
 451   _next_offset_index = _array->index_for_raw(_bottom);
 452   _next_offset_index++;
 453   _next_offset_threshold =
 454     _array->address_for_index_raw(_next_offset_index);
 455   return _next_offset_threshold;
 456 }
 457 
 458 void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
 459   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 460          "just checking");
 461   size_t bottom_index = _array->index_for_raw(_bottom);
 462   assert(_array->address_for_index_raw(bottom_index) == _bottom,
 463          "Precondition of call");
 464   _array->set_offset_array_raw(bottom_index, 0);
 465 }
 466 
 467 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
 468   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 469          "just checking");
 470   _next_offset_index = _array->index_for(_bottom);
 471   _next_offset_index++;
 472   _next_offset_threshold =
 473     _array->address_for_index(_next_offset_index);
 474   return _next_offset_threshold;









 475 }
 476 
 477 void
 478 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
 479   assert(new_top <= _end, "_end should have already been updated");
 480 
 481   // The first BOT entry should have offset 0.
 482   reset_bot();
 483   alloc_block(_bottom, new_top);
 484  }
 485 
 486 #ifndef PRODUCT
 487 void
 488 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
 489   G1BlockOffsetArray::print_on(out);
 490   out->print_cr("  next offset threshold: "PTR_FORMAT, p2i(_next_offset_threshold));
 491   out->print_cr("  next offset index:     "SIZE_FORMAT, _next_offset_index);
 492 }
 493 #endif // !PRODUCT
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File