src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Print this page
rev 6587 : 8047820: G1 Block offset table does not need to support generic Space classes
Reviewed-by:


   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"

  27 #include "memory/space.hpp"
  28 #include "oops/oop.inline.hpp"
  29 #include "runtime/java.hpp"
  30 #include "services/memTracker.hpp"
  31 
  32 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  33 
  34 //////////////////////////////////////////////////////////////////////
  35 // G1BlockOffsetSharedArray
  36 //////////////////////////////////////////////////////////////////////
  37 
  38 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
  39                                                    size_t init_word_size) :
  40   _reserved(reserved), _end(NULL)
  41 {
  42   size_t size = compute_size(reserved.word_size());
  43   ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
  44   if (!rs.is_reserved()) {
  45     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
  46   }


  90     _vs.shrink_by(delta);
  91     assert(_vs.high() == high - delta, "invalid expansion");
  92   }
  93 }
  94 
  95 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
  96   assert(p >= _reserved.start(), "just checking");
  97   size_t delta = pointer_delta(p, _reserved.start());
  98   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
  99 }
 100 
 101 
 102 //////////////////////////////////////////////////////////////////////
 103 // G1BlockOffsetArray
 104 //////////////////////////////////////////////////////////////////////
 105 
 106 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
 107                                        MemRegion mr, bool init_to_zero) :
 108   G1BlockOffsetTable(mr.start(), mr.end()),
 109   _unallocated_block(_bottom),
 110   _array(array), _csp(NULL),
 111   _init_to_zero(init_to_zero) {
 112   assert(_bottom <= _end, "arguments out of order");
 113   if (!_init_to_zero) {
 114     // initialize cards to point back to mr.start()
 115     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
 116     _array->set_offset_array(0, 0);  // set first card to 0
 117   }
 118 }
 119 
 120 void G1BlockOffsetArray::set_space(Space* sp) {
 121   _sp = sp;
 122   _csp = sp->toContiguousSpace();
 123 }
 124 
 125 // The arguments follow the normal convention of denoting
 126 // a right-open interval: [start, end)
 127 void
 128 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
 129 
 130   if (start >= end) {
 131     // The start address is equal to the end address (or to
 132     // the right of the end address) so there are not cards
 133     // that need to be updated..
 134     return;
 135   }
 136 
 137   // Write the backskip value for each region.
 138   //
 139   //    offset
 140   //    card             2nd                       3rd
 141   //     | +- 1st        |                         |
 142   //     v v             v                         v


 361   }
 362   // Otherwise, find the block start using the table.
 363   HeapWord* q = block_at_or_preceding(addr, false, 0);
 364   return forward_to_block_containing_addr(q, addr);
 365 }
 366 
 367 // This duplicates a little code from the above: unavoidable.
 368 HeapWord*
 369 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
 370   assert(_bottom <= addr && addr < _end,
 371          "addr must be covered by this Array");
 372   // Must read this exactly once because it can be modified by parallel
 373   // allocation.
 374   HeapWord* ub = _unallocated_block;
 375   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 376     assert(ub < _end, "tautology (see above)");
 377     return ub;
 378   }
 379   // Otherwise, find the block start using the table.
 380   HeapWord* q = block_at_or_preceding(addr, false, 0);
 381   HeapWord* n = q + _sp->block_size(q);
 382   return forward_to_block_containing_addr_const(q, n, addr);
 383 }
 384 
 385 
 386 HeapWord*
 387 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
 388                                                           HeapWord* n,
 389                                                           const void* addr) {
 390   // We're not in the normal case.  We need to handle an important subcase
 391   // here: LAB allocation.  An allocation previously recorded in the
 392   // offset table was actually a lab allocation, and was divided into
 393   // several objects subsequently.  Fix this situation as we answer the
 394   // query, by updating entries as we cross them.
 395 
 396   // If the fist object's end q is at the card boundary. Start refining
 397   // with the corresponding card (the value of the entry will be basically
 398   // set to 0). If the object crosses the boundary -- start from the next card.
 399   size_t n_index = _array->index_for(n);
 400   size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
 401   // Calculate a consistent next boundary.  If "n" is not at the boundary
 402   // already, step to the boundary.
 403   HeapWord* next_boundary = _array->address_for_index(n_index) +
 404                             (n_index == next_index ? 0 : N_words);
 405   assert(next_boundary <= _array->_end,
 406          err_msg("next_boundary is beyond the end of the covered region "
 407                  " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
 408                  next_boundary, _array->_end));
 409   if (csp() != NULL) {
 410     if (addr >= csp()->top()) return csp()->top();
 411     while (next_boundary < addr) {
 412       while (n <= next_boundary) {
 413         q = n;
 414         oop obj = oop(q);
 415         if (obj->klass_or_null() == NULL) return q;
 416         n += obj->size();
 417       }
 418       assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
 419       // [q, n) is the block that crosses the boundary.
 420       alloc_block_work2(&next_boundary, &next_index, q, n);
 421     }
 422   } else {
 423     while (next_boundary < addr) {
 424       while (n <= next_boundary) {
 425         q = n;
 426         oop obj = oop(q);
 427         if (obj->klass_or_null() == NULL) return q;
 428         n += _sp->block_size(q);
 429       }
 430       assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
 431       // [q, n) is the block that crosses the boundary.
 432       alloc_block_work2(&next_boundary, &next_index, q, n);
 433     }
 434   }
 435   return forward_to_block_containing_addr_const(q, n, addr);
 436 }
 437 
 438 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
 439   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
 440 
 441   assert(_bottom <= addr && addr < _end,
 442          "addr must be covered by this Array");
 443   // Must read this exactly once because it can be modified by parallel
 444   // allocation.
 445   HeapWord* ub = _unallocated_block;
 446   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 447     assert(ub < _end, "tautology (see above)");
 448     return ub;
 449   }
 450 
 451   // Otherwise, find the block start using the table, but taking
 452   // care (cf block_start_unsafe() above) not to parse any objects/blocks
 453   // on the cards themselves.
 454   size_t index = _array->index_for(addr);


 620 #endif // !PRODUCT
 621 
 622 //////////////////////////////////////////////////////////////////////
 623 // G1BlockOffsetArrayContigSpace
 624 //////////////////////////////////////////////////////////////////////
 625 
 626 HeapWord*
 627 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
 628   assert(_bottom <= addr && addr < _end,
 629          "addr must be covered by this Array");
 630   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 631   return forward_to_block_containing_addr(q, addr);
 632 }
 633 
 634 HeapWord*
 635 G1BlockOffsetArrayContigSpace::
 636 block_start_unsafe_const(const void* addr) const {
 637   assert(_bottom <= addr && addr < _end,
 638          "addr must be covered by this Array");
 639   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 640   HeapWord* n = q + _sp->block_size(q);
 641   return forward_to_block_containing_addr_const(q, n, addr);
 642 }
 643 
 644 G1BlockOffsetArrayContigSpace::
 645 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
 646                               MemRegion mr) :
 647   G1BlockOffsetArray(array, mr, true)
 648 {
 649   _next_offset_threshold = NULL;
 650   _next_offset_index = 0;
 651 }
 652 
 653 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
 654   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 655          "just checking");
 656   _next_offset_index = _array->index_for(_bottom);
 657   _next_offset_index++;
 658   _next_offset_threshold =
 659     _array->address_for_index(_next_offset_index);
 660   return _next_offset_threshold;




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  27 #include "gc_implementation/g1/heapRegion.hpp"
  28 #include "memory/space.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/java.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  34 
  35 //////////////////////////////////////////////////////////////////////
  36 // G1BlockOffsetSharedArray
  37 //////////////////////////////////////////////////////////////////////
  38 
  39 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
  40                                                    size_t init_word_size) :
  41   _reserved(reserved), _end(NULL)
  42 {
  43   size_t size = compute_size(reserved.word_size());
  44   ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
  45   if (!rs.is_reserved()) {
  46     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
  47   }


  91     _vs.shrink_by(delta);
  92     assert(_vs.high() == high - delta, "invalid expansion");
  93   }
  94 }
  95 
  96 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
  97   assert(p >= _reserved.start(), "just checking");
  98   size_t delta = pointer_delta(p, _reserved.start());
  99   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
 100 }
 101 
 102 
 103 //////////////////////////////////////////////////////////////////////
 104 // G1BlockOffsetArray
 105 //////////////////////////////////////////////////////////////////////
 106 
 107 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
 108                                        MemRegion mr, bool init_to_zero) :
 109   G1BlockOffsetTable(mr.start(), mr.end()),
 110   _unallocated_block(_bottom),
 111   _array(array), _gsp(NULL),
 112   _init_to_zero(init_to_zero) {
 113   assert(_bottom <= _end, "arguments out of order");
 114   if (!_init_to_zero) {
 115     // initialize cards to point back to mr.start()
 116     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
 117     _array->set_offset_array(0, 0);  // set first card to 0
 118   }
 119 }
 120 
 121 void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
 122   _gsp = sp;

 123 }
 124 
 125 // The arguments follow the normal convention of denoting
 126 // a right-open interval: [start, end)
 127 void
 128 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
 129 
 130   if (start >= end) {
 131     // The start address is equal to the end address (or to
 132     // the right of the end address) so there are not cards
 133     // that need to be updated..
 134     return;
 135   }
 136 
 137   // Write the backskip value for each region.
 138   //
 139   //    offset
 140   //    card             2nd                       3rd
 141   //     | +- 1st        |                         |
 142   //     v v             v                         v


 361   }
 362   // Otherwise, find the block start using the table.
 363   HeapWord* q = block_at_or_preceding(addr, false, 0);
 364   return forward_to_block_containing_addr(q, addr);
 365 }
 366 
 367 // This duplicates a little code from the above: unavoidable.
 368 HeapWord*
 369 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
 370   assert(_bottom <= addr && addr < _end,
 371          "addr must be covered by this Array");
 372   // Must read this exactly once because it can be modified by parallel
 373   // allocation.
 374   HeapWord* ub = _unallocated_block;
 375   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 376     assert(ub < _end, "tautology (see above)");
 377     return ub;
 378   }
 379   // Otherwise, find the block start using the table.
 380   HeapWord* q = block_at_or_preceding(addr, false, 0);
 381   HeapWord* n = q + block_size(q);
 382   return forward_to_block_containing_addr_const(q, n, addr);
 383 }
 384 
 385 
 386 HeapWord*
 387 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
 388                                                           HeapWord* n,
 389                                                           const void* addr) {
 390   // We're not in the normal case.  We need to handle an important subcase
 391   // here: LAB allocation.  An allocation previously recorded in the
 392   // offset table was actually a lab allocation, and was divided into
 393   // several objects subsequently.  Fix this situation as we answer the
 394   // query, by updating entries as we cross them.
 395 
 396   // If the fist object's end q is at the card boundary. Start refining
 397   // with the corresponding card (the value of the entry will be basically
 398   // set to 0). If the object crosses the boundary -- start from the next card.
 399   size_t n_index = _array->index_for(n);
 400   size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
 401   // Calculate a consistent next boundary.  If "n" is not at the boundary
 402   // already, step to the boundary.
 403   HeapWord* next_boundary = _array->address_for_index(n_index) +
 404                             (n_index == next_index ? 0 : N_words);
 405   assert(next_boundary <= _array->_end,
 406          err_msg("next_boundary is beyond the end of the covered region "
 407                  " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
 408                  next_boundary, _array->_end));
 409   if (addr >= gsp()->top()) return gsp()->top();

 410   while (next_boundary < addr) {
 411     while (n <= next_boundary) {
 412       q = n;
 413       oop obj = oop(q);
 414       if (obj->klass_or_null() == NULL) return q;
 415       n += block_size(q);
 416     }
 417     assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
 418     // [q, n) is the block that crosses the boundary.
 419     alloc_block_work2(&next_boundary, &next_index, q, n);
 420   }













 421   return forward_to_block_containing_addr_const(q, n, addr);
 422 }
 423 
 424 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
 425   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
 426 
 427   assert(_bottom <= addr && addr < _end,
 428          "addr must be covered by this Array");
 429   // Must read this exactly once because it can be modified by parallel
 430   // allocation.
 431   HeapWord* ub = _unallocated_block;
 432   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
 433     assert(ub < _end, "tautology (see above)");
 434     return ub;
 435   }
 436 
 437   // Otherwise, find the block start using the table, but taking
 438   // care (cf block_start_unsafe() above) not to parse any objects/blocks
 439   // on the cards themselves.
 440   size_t index = _array->index_for(addr);


 606 #endif // !PRODUCT
 607 
 608 //////////////////////////////////////////////////////////////////////
 609 // G1BlockOffsetArrayContigSpace
 610 //////////////////////////////////////////////////////////////////////
 611 
 612 HeapWord*
 613 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
 614   assert(_bottom <= addr && addr < _end,
 615          "addr must be covered by this Array");
 616   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 617   return forward_to_block_containing_addr(q, addr);
 618 }
 619 
 620 HeapWord*
 621 G1BlockOffsetArrayContigSpace::
 622 block_start_unsafe_const(const void* addr) const {
 623   assert(_bottom <= addr && addr < _end,
 624          "addr must be covered by this Array");
 625   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 626   HeapWord* n = q + block_size(q);
 627   return forward_to_block_containing_addr_const(q, n, addr);
 628 }
 629 
 630 G1BlockOffsetArrayContigSpace::
 631 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
 632                               MemRegion mr) :
 633   G1BlockOffsetArray(array, mr, true)
 634 {
 635   _next_offset_threshold = NULL;
 636   _next_offset_index = 0;
 637 }
 638 
 639 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
 640   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 641          "just checking");
 642   _next_offset_index = _array->index_for(_bottom);
 643   _next_offset_index++;
 644   _next_offset_threshold =
 645     _array->address_for_index(_next_offset_index);
 646   return _next_offset_threshold;