< prev index next >

src/hotspot/share/gc/shared/cardTable.cpp

Print this page
rev 58025 : imported patch 8238854-remove-superfluous-alloc-checks
rev 58026 : [mq]: 8238854-sjohanss-review


  44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
  45   _scanned_concurrently(conc_scan),
  46   _whole_heap(whole_heap),
  47   _guard_index(0),
  48   _last_valid_index(0),
  49   _page_size(os::vm_page_size()),
  50   _byte_map_size(0),
  51   _byte_map(NULL),
  52   _byte_map_base(NULL),
  53   _cur_covered_regions(0),
  54   _covered(NULL),
  55   _committed(NULL),
  56   _guard_region()
  57 {
  58   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  59   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  60 
  61   assert(card_size <= 512, "card_size must be less than 512"); // why?
  62 
  63   _covered = new MemRegion[_max_covered_regions];



  64 }
  65 
  66 CardTable::~CardTable() {
  67   if (_covered) {
  68     delete[] _covered;
  69     _covered = NULL;
  70   }
  71   if (_committed) {
  72     delete[] _committed;
  73     _committed = NULL;
  74   }
  75 }
  76 
  77 void CardTable::initialize() {
  78   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  79   _last_valid_index = _guard_index - 1;
  80 
  81   _byte_map_size = compute_byte_map_size();
  82 
  83   HeapWord* low_bound  = _whole_heap.start();
  84   HeapWord* high_bound = _whole_heap.end();
  85 
  86   _cur_covered_regions = 0;
  87   _committed = new MemRegion[_max_covered_regions];



  88 
  89   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  90     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  91   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  92 
  93   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  94 
  95   os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
  96                        _page_size, heap_rs.base(), heap_rs.size());
  97   if (!heap_rs.is_reserved()) {
  98     vm_exit_during_initialization("Could not reserve enough space for the "
  99                                   "card marking array");
 100   }
 101 
 102   // The assembler store_check code will do an unsigned shift of the oop,
 103   // then add it to _byte_map_base, i.e.
 104   //
 105   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
 106   _byte_map = (CardValue*) heap_rs.base();
 107   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);




  44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
  45   _scanned_concurrently(conc_scan),
  46   _whole_heap(whole_heap),
  47   _guard_index(0),
  48   _last_valid_index(0),
  49   _page_size(os::vm_page_size()),
  50   _byte_map_size(0),
  51   _byte_map(NULL),
  52   _byte_map_base(NULL),
  53   _cur_covered_regions(0),
  54   _covered(NULL),
  55   _committed(NULL),
  56   _guard_region()
  57 {
  58   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  59   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  60 
  61   assert(card_size <= 512, "card_size must be less than 512"); // why?
  62 
  63   _covered = new MemRegion[_max_covered_regions];
  64   if (_covered == NULL) {
  65     vm_exit_during_initialization("Could not allocate card table covered region set.");
  66   }
  67 }
  68 
  69 CardTable::~CardTable() {
  70   if (_covered) {
  71     delete[] _covered;
  72     _covered = NULL;
  73   }
  74   if (_committed) {
  75     delete[] _committed;
  76     _committed = NULL;
  77   }
  78 }
  79 
  80 void CardTable::initialize() {
  81   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  82   _last_valid_index = _guard_index - 1;
  83 
  84   _byte_map_size = compute_byte_map_size();
  85 
  86   HeapWord* low_bound  = _whole_heap.start();
  87   HeapWord* high_bound = _whole_heap.end();
  88 
  89   _cur_covered_regions = 0;
  90   _committed = new MemRegion[_max_covered_regions];
  91   if (_committed == NULL) {
  92     vm_exit_during_initialization("Could not allocate card table committed region set.");
  93   }
  94 
  95   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  96     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  97   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  98 
  99   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
 100 
 101   os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
 102                        _page_size, heap_rs.base(), heap_rs.size());
 103   if (!heap_rs.is_reserved()) {
 104     vm_exit_during_initialization("Could not reserve enough space for the "
 105                                   "card marking array");
 106   }
 107 
 108   // The assembler store_check code will do an unsigned shift of the oop,
 109   // then add it to _byte_map_base, i.e.
 110   //
 111   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
 112   _byte_map = (CardValue*) heap_rs.base();
 113   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);


< prev index next >