< prev index next >

src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Print this page
rev 8069 : imported patch g1_cleanup


   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  27 #include "gc_implementation/g1/heapRegion.hpp"
  28 #include "memory/space.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/java.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 
  34 
  35 //////////////////////////////////////////////////////////////////////
  36 // G1BlockOffsetSharedArray
  37 //////////////////////////////////////////////////////////////////////
  38 
  39 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
  40   _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
  41 
  42   _reserved = heap;
  43   _end = NULL;
  44 
  45   MemRegion bot_reserved = storage->reserved();


 286 //              |   _index_
 287 //              v   v
 288 //      +-------+-------+-------+-------+-------+
 289 //      | i-1   |   i   | i+1   | i+2   | i+3   |
 290 //      +-------+-------+-------+-------+-------+
 291 //       ( ^    ]
 292 //         block-start
 293 //
 294 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
 295                                            HeapWord* blk_start, HeapWord* blk_end) {
 296   // For efficiency, do copy-in/copy-out.
 297   HeapWord* threshold = *threshold_;
 298   size_t    index = *index_;
 299 
 300   assert(blk_start != NULL && blk_end > blk_start,
 301          "phantom block");
 302   assert(blk_end > threshold, "should be past threshold");
 303   assert(blk_start <= threshold, "blk_start should be at or before threshold");
 304   assert(pointer_delta(threshold, blk_start) <= N_words,
 305          "offset should be <= BlockOffsetSharedArray::N");
 306   assert(Universe::heap()->is_in_reserved(blk_start),
 307          "reference must be into the heap");
 308   assert(Universe::heap()->is_in_reserved(blk_end-1),
 309          "limit must be within the heap");
 310   assert(threshold == _array->_reserved.start() + index*N_words,
 311          "index must agree with threshold");
 312 
 313   DEBUG_ONLY(size_t orig_index = index;)
 314 
 315   // Mark the card that holds the offset into the block.  Note
 316   // that _next_offset_index and _next_offset_threshold are not
 317   // updated until the end of this method.
 318   _array->set_offset_array(index, threshold, blk_start);
 319 
 320   // We need to now mark the subsequent cards that this blk spans.
 321 
 322   // Index of card on which blk ends.
 323   size_t end_index   = _array->index_for(blk_end - 1);
 324 
 325   // Are there more cards left to be updated?
 326   if (index + 1 <= end_index) {
 327     HeapWord* rem_st  = _array->address_for_index(index + 1);
 328     // Calculate rem_end this way because end_index


 441 HeapWord*
 442 G1BlockOffsetArrayContigSpace::
 443 block_start_unsafe_const(const void* addr) const {
 444   assert(_bottom <= addr && addr < _end,
 445          "addr must be covered by this Array");
 446   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 447   HeapWord* n = q + block_size(q);
 448   return forward_to_block_containing_addr_const(q, n, addr);
 449 }
 450 
 451 G1BlockOffsetArrayContigSpace::
 452 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
 453                               MemRegion mr) :
 454   G1BlockOffsetArray(array, mr)
 455 {
 456   _next_offset_threshold = NULL;
 457   _next_offset_index = 0;
 458 }
 459 
 460 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
 461   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 462          "just checking");
 463   _next_offset_index = _array->index_for_raw(_bottom);
 464   _next_offset_index++;
 465   _next_offset_threshold =
 466     _array->address_for_index_raw(_next_offset_index);
 467   return _next_offset_threshold;
 468 }
 469 
 470 void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
 471   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 472          "just checking");
 473   size_t bottom_index = _array->index_for_raw(_bottom);
 474   assert(_array->address_for_index_raw(bottom_index) == _bottom,
 475          "Precondition of call");
 476   _array->set_offset_array_raw(bottom_index, 0);
 477 }
 478 
 479 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
 480   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
 481          "just checking");
 482   _next_offset_index = _array->index_for(_bottom);
 483   _next_offset_index++;
 484   _next_offset_threshold =
 485     _array->address_for_index(_next_offset_index);
 486   return _next_offset_threshold;
 487 }
 488 
 489 void
 490 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
 491   assert(new_top <= _end, "_end should have already been updated");
 492 
 493   // The first BOT entry should have offset 0.
 494   reset_bot();
 495   alloc_block(_bottom, new_top);
 496  }
 497 
 498 #ifndef PRODUCT
 499 void
 500 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {


   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc_implementation/g1/heapRegion.hpp"
  29 #include "memory/space.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/java.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 
  35 
  36 //////////////////////////////////////////////////////////////////////
  37 // G1BlockOffsetSharedArray
  38 //////////////////////////////////////////////////////////////////////
  39 
  40 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
  41   _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
  42 
  43   _reserved = heap;
  44   _end = NULL;
  45 
  46   MemRegion bot_reserved = storage->reserved();


 287 //              |   _index_
 288 //              v   v
 289 //      +-------+-------+-------+-------+-------+
 290 //      | i-1   |   i   | i+1   | i+2   | i+3   |
 291 //      +-------+-------+-------+-------+-------+
 292 //       ( ^    ]
 293 //         block-start
 294 //
 295 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
 296                                            HeapWord* blk_start, HeapWord* blk_end) {
 297   // For efficiency, do copy-in/copy-out.
 298   HeapWord* threshold = *threshold_;
 299   size_t    index = *index_;
 300 
 301   assert(blk_start != NULL && blk_end > blk_start,
 302          "phantom block");
 303   assert(blk_end > threshold, "should be past threshold");
 304   assert(blk_start <= threshold, "blk_start should be at or before threshold");
 305   assert(pointer_delta(threshold, blk_start) <= N_words,
 306          "offset should be <= BlockOffsetSharedArray::N");
 307   assert(G1CollectedHeap::heap()->is_in_reserved(blk_start),
 308          "reference must be into the heap");
 309   assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
 310          "limit must be within the heap");
 311   assert(threshold == _array->_reserved.start() + index*N_words,
 312          "index must agree with threshold");
 313 
 314   DEBUG_ONLY(size_t orig_index = index;)
 315 
 316   // Mark the card that holds the offset into the block.  Note
 317   // that _next_offset_index and _next_offset_threshold are not
 318   // updated until the end of this method.
 319   _array->set_offset_array(index, threshold, blk_start);
 320 
 321   // We need to now mark the subsequent cards that this blk spans.
 322 
 323   // Index of card on which blk ends.
 324   size_t end_index   = _array->index_for(blk_end - 1);
 325 
 326   // Are there more cards left to be updated?
 327   if (index + 1 <= end_index) {
 328     HeapWord* rem_st  = _array->address_for_index(index + 1);
 329     // Calculate rem_end this way because end_index


 442 HeapWord*
 443 G1BlockOffsetArrayContigSpace::
 444 block_start_unsafe_const(const void* addr) const {
 445   assert(_bottom <= addr && addr < _end,
 446          "addr must be covered by this Array");
 447   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
 448   HeapWord* n = q + block_size(q);
 449   return forward_to_block_containing_addr_const(q, n, addr);
 450 }
 451 
 452 G1BlockOffsetArrayContigSpace::
 453 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
 454                               MemRegion mr) :
 455   G1BlockOffsetArray(array, mr)
 456 {
 457   _next_offset_threshold = NULL;
 458   _next_offset_index = 0;
 459 }
 460 
 461 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
 462   assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
 463          "just checking");
 464   _next_offset_index = _array->index_for_raw(_bottom);
 465   _next_offset_index++;
 466   _next_offset_threshold =
 467     _array->address_for_index_raw(_next_offset_index);
 468   return _next_offset_threshold;
 469 }
 470 
 471 void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
 472   assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
 473          "just checking");
 474   size_t bottom_index = _array->index_for_raw(_bottom);
 475   assert(_array->address_for_index_raw(bottom_index) == _bottom,
 476          "Precondition of call");
 477   _array->set_offset_array_raw(bottom_index, 0);
 478 }
 479 
 480 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
 481   assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
 482          "just checking");
 483   _next_offset_index = _array->index_for(_bottom);
 484   _next_offset_index++;
 485   _next_offset_threshold =
 486     _array->address_for_index(_next_offset_index);
 487   return _next_offset_threshold;
 488 }
 489 
 490 void
 491 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
 492   assert(new_top <= _end, "_end should have already been updated");
 493 
 494   // The first BOT entry should have offset 0.
 495   reset_bot();
 496   alloc_block(_bottom, new_top);
 497  }
 498 
 499 #ifndef PRODUCT
 500 void
 501 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
< prev index next >