< prev index next >

src/hotspot/share/gc/shared/cardGeneration.cpp

Print this page
rev 58025 : imported patch 8238854-remove-superfluous-alloc-checks


  36 #include "memory/memRegion.hpp"
  37 #include "logging/log.hpp"
  38 #include "runtime/java.hpp"
  39 
  40 CardGeneration::CardGeneration(ReservedSpace rs,
  41                                size_t initial_byte_size,
  42                                CardTableRS* remset) :
  43   Generation(rs, initial_byte_size), _rs(remset),
  44   _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
  45   _used_at_prologue()
  46 {
  47   HeapWord* start = (HeapWord*)rs.base();
  48   size_t reserved_byte_size = rs.size();
  49   assert((uintptr_t(start) & 3) == 0, "bad alignment");
  50   assert((reserved_byte_size & 3) == 0, "bad alignment");
  51   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
  52   _bts = new BlockOffsetSharedArray(reserved_mr,
  53                                     heap_word_size(initial_byte_size));
  54   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
  55   _rs->resize_covered_region(committed_mr);
  56   if (_bts == NULL) {
  57     vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
  58   }
  59 
  60   // Verify that the start and end of this generation is the start of a card.
  61   // If this wasn't true, a single card could span more than on generation,
  62   // which would cause problems when we commit/uncommit memory, and when we
  63   // clear and dirty cards.
  64   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
  65   if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
  66     // Don't check at the very end of the heap as we'll assert that we're probing off
  67     // the end if we try.
  68     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
  69   }
  70   _min_heap_delta_bytes = MinHeapDeltaBytes;
  71   _capacity_at_prologue = initial_byte_size;
  72   _used_at_prologue = 0;
  73 }
  74 
  75 bool CardGeneration::grow_by(size_t bytes) {
  76   assert_correct_size_change_locking();
  77   bool result = _virtual_space.expand_by(bytes);
  78   if (result) {




  36 #include "memory/memRegion.hpp"
  37 #include "logging/log.hpp"
  38 #include "runtime/java.hpp"
  39 
  40 CardGeneration::CardGeneration(ReservedSpace rs,
  41                                size_t initial_byte_size,
  42                                CardTableRS* remset) :
  43   Generation(rs, initial_byte_size), _rs(remset),
  44   _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
  45   _used_at_prologue()
  46 {
  47   HeapWord* start = (HeapWord*)rs.base();
  48   size_t reserved_byte_size = rs.size();
  49   assert((uintptr_t(start) & 3) == 0, "bad alignment");
  50   assert((reserved_byte_size & 3) == 0, "bad alignment");
  51   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
  52   _bts = new BlockOffsetSharedArray(reserved_mr,
  53                                     heap_word_size(initial_byte_size));
  54   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
  55   _rs->resize_covered_region(committed_mr);



  56 
  57   // Verify that the start and end of this generation is the start of a card.
  58   // If this wasn't true, a single card could span more than on generation,
  59   // which would cause problems when we commit/uncommit memory, and when we
  60   // clear and dirty cards.
  61   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
  62   if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
  63     // Don't check at the very end of the heap as we'll assert that we're probing off
  64     // the end if we try.
  65     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
  66   }
  67   _min_heap_delta_bytes = MinHeapDeltaBytes;
  68   _capacity_at_prologue = initial_byte_size;
  69   _used_at_prologue = 0;
  70 }
  71 
  72 bool CardGeneration::grow_by(size_t bytes) {
  73   assert_correct_size_change_locking();
  74   bool result = _virtual_space.expand_by(bytes);
  75   if (result) {


< prev index next >