< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 8048 : 8058354: SPECjvm2008-Derby -2.7% performance regression on Solaris-X64 starting with 9-b29
Summary: Allow partial use of large pages for auxiliary data structures in G1.
Reviewed-by: jmasa


  98 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  99   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 100   return (int) (diff >> _shifter);
 101 }
 102 
 103 #ifndef PRODUCT
 104 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 105   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 106   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 107          "size inconsistency");
 108   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 109          _bmWordSize  == heap_rs.word_size();
 110 }
 111 #endif
 112 
 113 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 114   _bm.print_on_error(st, prefix);
 115 }
 116 
 117 size_t CMBitMap::compute_size(size_t heap_size) {
 118   return heap_size / mark_distance();
 119 }
 120 
 121 size_t CMBitMap::mark_distance() {
 122   return MinObjAlignmentInBytes * BitsPerByte;
 123 }
 124 
 125 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 126   _bmStartWord = heap.start();
 127   _bmWordSize = heap.word_size();
 128 
 129   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 130   _bm.set_size(_bmWordSize >> _shifter);
 131 
 132   storage->set_mapping_changed_listener(&_listener);
 133 }
 134 
 135 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 136   if (zero_filled) {
 137     return;
 138   }




  98 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  99   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
 100   return (int) (diff >> _shifter);
 101 }
 102 
 103 #ifndef PRODUCT
 104 bool CMBitMapRO::covers(MemRegion heap_rs) const {
 105   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
 106   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
 107          "size inconsistency");
 108   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
 109          _bmWordSize  == heap_rs.word_size();
 110 }
 111 #endif
 112 
 113 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
 114   _bm.print_on_error(st, prefix);
 115 }
 116 
 117 size_t CMBitMap::compute_size(size_t heap_size) {
 118   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 119 }
 120 
 121 size_t CMBitMap::mark_distance() {
 122   return MinObjAlignmentInBytes * BitsPerByte;
 123 }
 124 
 125 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 126   _bmStartWord = heap.start();
 127   _bmWordSize = heap.word_size();
 128 
 129   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 130   _bm.set_size(_bmWordSize >> _shifter);
 131 
 132   storage->set_mapping_changed_listener(&_listener);
 133 }
 134 
 135 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 136   if (zero_filled) {
 137     return;
 138   }


< prev index next >