< prev index next >

src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp

Print this page
rev 51152 : [mq]: gcbuildoptionspatch


2156   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2157          "linear allocation block should be empty");
2158   FreeChunk* fc;
2159   if (blk->_refillSize < SmallForDictionary &&
2160       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2161     // A linAB's strategy might be to use small sizes to reduce
2162     // fragmentation but still get the benefits of allocation from a
2163     // linAB.
2164   } else {
2165     fc = getChunkFromDictionary(blk->_refillSize);
2166   }
2167   if (fc != NULL) {
2168     blk->_ptr  = (HeapWord*)fc;
2169     blk->_word_size = fc->size();
2170     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2171   }
2172 }
2173 
2174 // Support for compaction
2175 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {

2176   scan_and_forward(this, cp);

2177   // Prepare_for_compaction() uses the space between live objects
2178   // so that later phase can skip dead space quickly.  So verification
2179   // of the free lists doesn't work after.
2180 }
2181 
2182 void CompactibleFreeListSpace::adjust_pointers() {
2183   // In other versions of adjust_pointers(), a bail out
2184   // based on the amount of live data in the generation
2185   // (i.e., if 0, bail out) may be used.
2186   // Cannot test used() == 0 here because the free lists have already
2187   // been mangled by the compaction.
2188 

2189   scan_and_adjust_pointers(this);

2190   // See note about verification in prepare_for_compaction().
2191 }
2192 
2193 void CompactibleFreeListSpace::compact() {

2194   scan_and_compact(this);

2195 }
2196 
2197 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2198 // where fbs is free block sizes
2199 double CompactibleFreeListSpace::flsFrag() const {
2200   size_t itabFree = totalSizeInIndexedFreeLists();
2201   double frag = 0.0;
2202   size_t i;
2203 
2204   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2205     double sz  = i;
2206     frag      += _indexedFreeList[i].count() * (sz * sz);
2207   }
2208 
2209   double totFree = itabFree +
2210                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2211   if (totFree > 0) {
2212     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2213             (totFree * totFree));
2214     frag = (double)1.0  - frag;




2156   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2157          "linear allocation block should be empty");
2158   FreeChunk* fc;
2159   if (blk->_refillSize < SmallForDictionary &&
2160       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2161     // A linAB's strategy might be to use small sizes to reduce
2162     // fragmentation but still get the benefits of allocation from a
2163     // linAB.
2164   } else {
2165     fc = getChunkFromDictionary(blk->_refillSize);
2166   }
2167   if (fc != NULL) {
2168     blk->_ptr  = (HeapWord*)fc;
2169     blk->_word_size = fc->size();
2170     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2171   }
2172 }
2173 
2174 // Support for compaction
2175 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2176 #if INCLUDE_SERIALGC
2177   scan_and_forward(this, cp);
2178 #endif
2179   // Prepare_for_compaction() uses the space between live objects
2180   // so that later phase can skip dead space quickly.  So verification
2181   // of the free lists doesn't work after.
2182 }
2183 
2184 void CompactibleFreeListSpace::adjust_pointers() {
2185   // In other versions of adjust_pointers(), a bail out
2186   // based on the amount of live data in the generation
2187   // (i.e., if 0, bail out) may be used.
2188   // Cannot test used() == 0 here because the free lists have already
2189   // been mangled by the compaction.
2190 
2191 #if INCLUDE_SERIALGC
2192   scan_and_adjust_pointers(this);
2193 #endif
2194   // See note about verification in prepare_for_compaction().
2195 }
2196 
2197 void CompactibleFreeListSpace::compact() {
2198 #if INCLUDE_SERIALGC
2199   scan_and_compact(this);
2200 #endif
2201 }
2202 
2203 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2204 // where fbs is free block sizes
2205 double CompactibleFreeListSpace::flsFrag() const {
2206   size_t itabFree = totalSizeInIndexedFreeLists();
2207   double frag = 0.0;
2208   size_t i;
2209 
2210   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2211     double sz  = i;
2212     frag      += _indexedFreeList[i].count() * (sz * sz);
2213   }
2214 
2215   double totFree = itabFree +
2216                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2217   if (totFree > 0) {
2218     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2219             (totFree * totFree));
2220     frag = (double)1.0  - frag;


< prev index next >