src/share/vm/memory/cardTableModRefBS.cpp

Print this page
rev 6804 : imported patch commit-uncommit-within-heap
rev 6806 : [mq]: bengt-suggestions


  27 #include "memory/cardTableModRefBS.hpp"
  28 #include "memory/cardTableRS.hpp"
  29 #include "memory/sharedHeap.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/virtualspace.hpp"
  36 #include "services/memTracker.hpp"
  37 #include "utilities/macros.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_LIR.hpp"
  40 #include "c1/c1_LIRGenerator.hpp"
  41 #endif
  42 
  43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  44 // enumerate ref fields that have been modified (since the last
  45 // enumeration.)
  46 
  47 size_t CardTableModRefBS::cards_required(size_t covered_words)
  48 {
  49   // Add one for a guard card, used to detect errors.
  50   const size_t words = align_size_up(covered_words, card_size_in_words);
  51   return words / card_size_in_words + 1;
  52 }
  53 
  54 size_t CardTableModRefBS::compute_byte_map_size()
  55 {
  56   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  57                                         "uninitialized, check declaration order");
  58   assert(_page_size != 0, "uninitialized, check declaration order");
  59   const size_t granularity = os::vm_allocation_granularity();
  60   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  61 }
  62 
  63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  64                                      int max_covered_regions):
  65   ModRefBarrierSet(max_covered_regions),
  66   _whole_heap(whole_heap),
  67   _guard_index(cards_required(whole_heap.word_size()) - 1),
  68   _last_valid_index(_guard_index - 1),

  69   _page_size(os::vm_page_size()),
  70   _byte_map_size(compute_byte_map_size())










  71 {
  72   _kind = BarrierSet::CardTableModRef;
  73 
  74   HeapWord* low_bound  = _whole_heap.start();
  75   HeapWord* high_bound = _whole_heap.end();
  76   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
  77   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
  78 
  79   assert(card_size <= 512, "card_size must be less than 512"); // why?
  80 
  81   _covered   = new MemRegion[max_covered_regions];
  82   _committed = new MemRegion[max_covered_regions];
  83   if (_covered == NULL || _committed == NULL) {
  84     vm_exit_during_initialization("couldn't alloc card table covered region set.");
  85   }










  86 
  87   _cur_covered_regions = 0;





  88   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  89     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  90   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  91 
  92   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  93 
  94   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
  95                        _page_size, heap_rs.base(), heap_rs.size());
  96   if (!heap_rs.is_reserved()) {
  97     vm_exit_during_initialization("Could not reserve enough space for the "
  98                                   "card marking array");
  99   }
 100 
 101   // The assembler store_check code will do an unsigned shift of the oop,
 102   // then add it to byte_map_base, i.e.
 103   //
 104   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 105   _byte_map = (jbyte*) heap_rs.base();
 106   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 107   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 108   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 109 
 110   jbyte* guard_card = &_byte_map[_guard_index];
 111   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 112   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 113   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 114                             !ExecMem, "card table last card");
 115   *guard_card = last_card;
 116 
 117    _lowest_non_clean =
 118     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
 119   _lowest_non_clean_chunk_size =
 120     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
 121   _lowest_non_clean_base_chunk_index =
 122     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
 123   _last_LNC_resizing_collection =
 124     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
 125   if (_lowest_non_clean == NULL
 126       || _lowest_non_clean_chunk_size == NULL
 127       || _lowest_non_clean_base_chunk_index == NULL
 128       || _last_LNC_resizing_collection == NULL)
 129     vm_exit_during_initialization("couldn't allocate an LNC array.");
 130   for (int i = 0; i < max_covered_regions; i++) {
 131     _lowest_non_clean[i] = NULL;
 132     _lowest_non_clean_chunk_size[i] = 0;
 133     _last_LNC_resizing_collection[i] = -1;
 134   }
 135 
 136   if (TraceCardTableModRefBS) {
 137     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 138     gclog_or_tty->print_cr("  "
 139                   "  &_byte_map[0]: " INTPTR_FORMAT
 140                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 141                   p2i(&_byte_map[0]),
 142                   p2i(&_byte_map[_last_valid_index]));
 143     gclog_or_tty->print_cr("  "
 144                   "  byte_map_base: " INTPTR_FORMAT,
 145                   p2i(byte_map_base));
 146   }
 147 }
 148 
 149 CardTableModRefBS::~CardTableModRefBS() {
 150   if (_covered) {




  27 #include "memory/cardTableModRefBS.hpp"
  28 #include "memory/cardTableRS.hpp"
  29 #include "memory/sharedHeap.hpp"
  30 #include "memory/space.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 #include "runtime/virtualspace.hpp"
  36 #include "services/memTracker.hpp"
  37 #include "utilities/macros.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_LIR.hpp"
  40 #include "c1/c1_LIRGenerator.hpp"
  41 #endif
  42 
  43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
  44 // enumerate ref fields that have been modified (since the last
  45 // enumeration.)
  46 







  47 size_t CardTableModRefBS::compute_byte_map_size()
  48 {
  49   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  50                                         "uninitialized, check declaration order");
  51   assert(_page_size != 0, "uninitialized, check declaration order");
  52   const size_t granularity = os::vm_allocation_granularity();
  53   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
  54 }
  55 
  56 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
  57                                      int max_covered_regions):
  58   ModRefBarrierSet(max_covered_regions),
  59   _whole_heap(whole_heap),
  60   _guard_index(0),
  61   _guard_region(),
  62   _last_valid_index(0),
  63   _page_size(os::vm_page_size()),
  64   _byte_map_size(0),
  65   _covered(NULL),
  66   _committed(NULL),
  67   _cur_covered_regions(0),
  68   _byte_map(NULL),
  69   byte_map_base(NULL),
  70   // LNC functionality
  71   _lowest_non_clean(NULL),
  72   _lowest_non_clean_chunk_size(NULL),
  73   _lowest_non_clean_base_chunk_index(NULL),
  74   _last_LNC_resizing_collection(NULL)
  75 {
  76   _kind = BarrierSet::CardTableModRef;
  77 
  78   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  79   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");


  80 
  81   assert(card_size <= 512, "card_size must be less than 512"); // why?
  82 
  83   _covered   = new MemRegion[_max_covered_regions];
  84   if (_covered == NULL) {
  85     vm_exit_during_initialization("Could not allocate card table covered region set.");

  86   }
  87 }
  88 
  89 void CardTableModRefBS::initialize() {
  90   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  91   _last_valid_index = _guard_index - 1;
  92 
  93   _byte_map_size = compute_byte_map_size();
  94 
  95   HeapWord* low_bound  = _whole_heap.start();
  96   HeapWord* high_bound = _whole_heap.end();
  97 
  98   _cur_covered_regions = 0;
  99   _committed = new MemRegion[_max_covered_regions];
 100   if (_committed == NULL) {
 101     vm_exit_during_initialization("Could not allocate card table committed region set.");
 102   }
 103 
 104   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
 105     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
 106   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
 107 
 108   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
 109 
 110   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
 111                        _page_size, heap_rs.base(), heap_rs.size());
 112   if (!heap_rs.is_reserved()) {
 113     vm_exit_during_initialization("Could not reserve enough space for the "
 114                                   "card marking array");
 115   }
 116 
 117   // The assembler store_check code will do an unsigned shift of the oop,
 118   // then add it to byte_map_base, i.e.
 119   //
 120   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
 121   _byte_map = (jbyte*) heap_rs.base();
 122   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
 123   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 124   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 125 
 126   jbyte* guard_card = &_byte_map[_guard_index];
 127   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
 128   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
 129   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 130                             !ExecMem, "card table last card");
 131   *guard_card = last_card;
 132 
 133    _lowest_non_clean =
 134     NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
 135   _lowest_non_clean_chunk_size =
 136     NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
 137   _lowest_non_clean_base_chunk_index =
 138     NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
 139   _last_LNC_resizing_collection =
 140     NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
 141   if (_lowest_non_clean == NULL
 142       || _lowest_non_clean_chunk_size == NULL
 143       || _lowest_non_clean_base_chunk_index == NULL
 144       || _last_LNC_resizing_collection == NULL)
 145     vm_exit_during_initialization("couldn't allocate an LNC array.");
 146   for (int i = 0; i < _max_covered_regions; i++) {
 147     _lowest_non_clean[i] = NULL;
 148     _lowest_non_clean_chunk_size[i] = 0;
 149     _last_LNC_resizing_collection[i] = -1;
 150   }
 151 
 152   if (TraceCardTableModRefBS) {
 153     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
 154     gclog_or_tty->print_cr("  "
 155                   "  &_byte_map[0]: " INTPTR_FORMAT
 156                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 157                   p2i(&_byte_map[0]),
 158                   p2i(&_byte_map[_last_valid_index]));
 159     gclog_or_tty->print_cr("  "
 160                   "  byte_map_base: " INTPTR_FORMAT,
 161                   p2i(byte_map_base));
 162   }
 163 }
 164 
 165 CardTableModRefBS::~CardTableModRefBS() {
 166   if (_covered) {