2078 // fragmentation but still get the benefits of allocation from a
2079 // linAB.
2080 } else {
2081 fc = getChunkFromDictionary(blk->_refillSize);
2082 }
2083 if (fc != NULL) {
2084 blk->_ptr = (HeapWord*)fc;
2085 blk->_word_size = fc->size();
2086 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
2087 }
2088 }
2089
2090 // Support for concurrent collection policy decisions.
2091 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2092 // In the future we might want to add in fragmentation stats --
2093 // including erosion of the "mountain" into this decision as well.
2094 return !adaptive_freelists() && linearAllocationWouldFail();
2095 }
2096
2097 // Support for compaction
2098
2099 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2100 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2101 // Prepare_for_compaction() uses the space between live objects
2102 // so that later phase can skip dead space quickly. So verification
2103 // of the free lists doesn't work after.
2104 }
2105
2106 #define obj_size(q) adjustObjectSize(oop(q)->size())
2107 #define adjust_obj_size(s) adjustObjectSize(s)
2108
2109 void CompactibleFreeListSpace::adjust_pointers() {
2110 // In other versions of adjust_pointers(), a bail out
2111 // based on the amount of live data in the generation
2112 // (i.e., if 0, bail out) may be used.
2113 // Cannot test used() == 0 here because the free lists have already
2114 // been mangled by the compaction.
2115
2116 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2117 // See note about verification in prepare_for_compaction().
2118 }
2119
2120 void CompactibleFreeListSpace::compact() {
2121 SCAN_AND_COMPACT(obj_size);
2122 }
2123
2124 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2125 // where fbs is free block sizes
2126 double CompactibleFreeListSpace::flsFrag() const {
2127 size_t itabFree = totalSizeInIndexedFreeLists();
2128 double frag = 0.0;
2129 size_t i;
2130
2131 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2132 double sz = i;
2133 frag += _indexedFreeList[i].count() * (sz * sz);
2134 }
2135
2136 double totFree = itabFree +
2137 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2138 if (totFree > 0) {
2139 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2140 (totFree * totFree));
2141 frag = (double)1.0 - frag;
|
2078 // fragmentation but still get the benefits of allocation from a
2079 // linAB.
2080 } else {
2081 fc = getChunkFromDictionary(blk->_refillSize);
2082 }
2083 if (fc != NULL) {
2084 blk->_ptr = (HeapWord*)fc;
2085 blk->_word_size = fc->size();
2086 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
2087 }
2088 }
2089
2090 // Support for concurrent collection policy decisions.
2091 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2092 // In the future we might want to add in fragmentation stats --
2093 // including erosion of the "mountain" into this decision as well.
2094 return !adaptive_freelists() && linearAllocationWouldFail();
2095 }
2096
2097 // Support for compaction
2098 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2099 scan_and_forward(this, cp);
2100 // Prepare_for_compaction() uses the space between live objects
2101 // so that later phase can skip dead space quickly. So verification
2102 // of the free lists doesn't work after.
2103 }
2104
2105 void CompactibleFreeListSpace::adjust_pointers() {
2106 // In other versions of adjust_pointers(), a bail out
2107 // based on the amount of live data in the generation
2108 // (i.e., if 0, bail out) may be used.
2109 // Cannot test used() == 0 here because the free lists have already
2110 // been mangled by the compaction.
2111
2112 scan_and_adjust_pointers(this);
2113 // See note about verification in prepare_for_compaction().
2114 }
2115
2116 void CompactibleFreeListSpace::compact() {
2117 scan_and_compact(this);
2118 }
2119
2120 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2121 // where fbs is free block sizes
2122 double CompactibleFreeListSpace::flsFrag() const {
2123 size_t itabFree = totalSizeInIndexedFreeLists();
2124 double frag = 0.0;
2125 size_t i;
2126
2127 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2128 double sz = i;
2129 frag += _indexedFreeList[i].count() * (sz * sz);
2130 }
2131
2132 double totFree = itabFree +
2133 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2134 if (totFree > 0) {
2135 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2136 (totFree * totFree));
2137 frag = (double)1.0 - frag;
|