< prev index next >

src/share/vm/gc/g1/g1RegionToSpaceMapper.cpp

Print this page
rev 11972 : [mq]: 8157952-sangheon-review


  49 // G1RegionToSpaceMapper implementation where the region granularity is larger than
  50 // or the same as the commit granularity.
  51 // Basically, the space corresponding to one region region spans several OS pages.
  52 class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
  53  private:
  54   size_t _pages_per_region;
  55 
  56  public:
  57   G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
  58                                       size_t actual_size,
  59                                       size_t page_size,
  60                                       size_t alloc_granularity,
  61                                       size_t commit_factor,
  62                                       MemoryType type) :
  63     G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
  64     _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
  65 
  66     guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
  67   }
  68 
  69   virtual void commit_regions(uint start_idx, size_t num_regions) {
  70     bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);




  71     _commit_map.set_range(start_idx, start_idx + num_regions);
  72     fire_on_commit(start_idx, num_regions, zero_filled);
  73   }
  74 
  75   virtual void uncommit_regions(uint start_idx, size_t num_regions) {
  76     _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
  77     _commit_map.clear_range(start_idx, start_idx + num_regions);
  78   }
  79 };
  80 
  81 // G1RegionToSpaceMapper implementation where the region granularity is smaller
  82 // than the commit granularity.
  83 // Basically, the contents of one OS page span several regions.
  84 class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
  85  private:
  86   class CommitRefcountArray : public G1BiasedMappedArray<uint> {
  87    protected:
  88      virtual uint default_value() const { return 0; }
  89   };
  90 


  93   CommitRefcountArray _refcounts;
  94 
  95   uintptr_t region_idx_to_page_idx(uint region) const {
  96     return region / _regions_per_page;
  97   }
  98 
  99  public:
 100   G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
 101                                        size_t actual_size,
 102                                        size_t page_size,
 103                                        size_t alloc_granularity,
 104                                        size_t commit_factor,
 105                                        MemoryType type) :
 106     G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
 107     _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
 108 
 109     guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
 110     _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
 111   }
 112 
 113   virtual void commit_regions(uint start_idx, size_t num_regions) {







 114     for (uint i = start_idx; i < start_idx + num_regions; i++) {
 115       assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i);
 116       size_t idx = region_idx_to_page_idx(i);
 117       uint old_refcount = _refcounts.get_by_index(idx);

 118       bool zero_filled = false;
 119       if (old_refcount == 0) {






 120         zero_filled = _storage.commit(idx, 1);
 121       }


 122       _refcounts.set_by_index(idx, old_refcount + 1);
 123       _commit_map.set_bit(i);
 124       fire_on_commit(i, 1, zero_filled);
 125     }




 126   }
 127 
 128   virtual void uncommit_regions(uint start_idx, size_t num_regions) {
 129     for (uint i = start_idx; i < start_idx + num_regions; i++) {
 130       assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i);
 131       size_t idx = region_idx_to_page_idx(i);
 132       uint old_refcount = _refcounts.get_by_index(idx);
 133       assert(old_refcount > 0, "must be");
 134       if (old_refcount == 1) {
 135         _storage.uncommit(idx, 1);
 136       }
 137       _refcounts.set_by_index(idx, old_refcount - 1);
 138       _commit_map.clear_bit(i);
 139     }
 140   }
 141 };
 142 
 143 void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 144   if (_listener != NULL) {
 145     _listener->on_commit(start_idx, num_regions, zero_filled);


  49 // G1RegionToSpaceMapper implementation where the region granularity is larger than
  50 // or the same as the commit granularity.
  51 // Basically, the space corresponding to one region region spans several OS pages.
  52 class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
  53  private:
  54   size_t _pages_per_region;
  55 
  56  public:
  57   G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
  58                                       size_t actual_size,
  59                                       size_t page_size,
  60                                       size_t alloc_granularity,
  61                                       size_t commit_factor,
  62                                       MemoryType type) :
  63     G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
  64     _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
  65 
  66     guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
  67   }
  68 
  69   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
  70     size_t const start_page = (size_t)start_idx * _pages_per_region;
  71     bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region);
  72     if (AlwaysPreTouch) {
  73       _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang);
  74     }
  75     _commit_map.set_range(start_idx, start_idx + num_regions);
  76     fire_on_commit(start_idx, num_regions, zero_filled);
  77   }
  78 
  79   virtual void uncommit_regions(uint start_idx, size_t num_regions) {
  80     _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
  81     _commit_map.clear_range(start_idx, start_idx + num_regions);
  82   }
  83 };
  84 
  85 // G1RegionToSpaceMapper implementation where the region granularity is smaller
  86 // than the commit granularity.
  87 // Basically, the contents of one OS page span several regions.
  88 class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
  89  private:
  90   class CommitRefcountArray : public G1BiasedMappedArray<uint> {
  91    protected:
  92      virtual uint default_value() const { return 0; }
  93   };
  94 


  97   CommitRefcountArray _refcounts;
  98 
  99   uintptr_t region_idx_to_page_idx(uint region) const {
 100     return region / _regions_per_page;
 101   }
 102 
 103  public:
 104   G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
 105                                        size_t actual_size,
 106                                        size_t page_size,
 107                                        size_t alloc_granularity,
 108                                        size_t commit_factor,
 109                                        MemoryType type) :
 110     G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
 111     _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
 112 
 113     guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
 114     _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
 115   }
 116 
 117   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
 118     size_t const NoPage = ~(size_t)0;
 119 
 120     size_t first_committed = NoPage;
 121     size_t num_committed = 0;
 122 
 123     bool all_zero_filled = true;
 124 
 125     for (uint i = start_idx; i < start_idx + num_regions; i++) {
 126       assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i);
 127       size_t idx = region_idx_to_page_idx(i);
 128       uint old_refcount = _refcounts.get_by_index(idx);
 129 
 130       bool zero_filled = false;
 131       if (old_refcount == 0) {
 132         if (first_committed == NoPage) {
 133           first_committed = idx;
 134           num_committed = 1;
 135         } else {
 136           num_committed++;
 137         }
 138         zero_filled = _storage.commit(idx, 1);
 139       }
 140       all_zero_filled &= zero_filled;
 141 
 142       _refcounts.set_by_index(idx, old_refcount + 1);
 143       _commit_map.set_bit(i);

 144     }
 145     if (AlwaysPreTouch && num_committed > 0) {
 146       _storage.pretouch(first_committed, num_committed, pretouch_gang);
 147     }
 148     fire_on_commit(start_idx, num_regions, all_zero_filled);
 149   }
 150 
 151   virtual void uncommit_regions(uint start_idx, size_t num_regions) {
 152     for (uint i = start_idx; i < start_idx + num_regions; i++) {
 153       assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i);
 154       size_t idx = region_idx_to_page_idx(i);
 155       uint old_refcount = _refcounts.get_by_index(idx);
 156       assert(old_refcount > 0, "must be");
 157       if (old_refcount == 1) {
 158         _storage.uncommit(idx, 1);
 159       }
 160       _refcounts.set_by_index(idx, old_refcount - 1);
 161       _commit_map.clear_bit(i);
 162     }
 163   }
 164 };
 165 
 166 void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 167   if (_listener != NULL) {
 168     _listener->on_commit(start_idx, num_regions, zero_filled);
< prev index next >