132 _smallLinearAllocBlock.set(addr, fc->size() ,
133 1024*SmallForLinearAlloc, fc->size());
134 // Note that _unallocated_block is not updated here.
135 // Allocations from the linear allocation block should
136 // update it.
137 } else {
138 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
139 SmallForLinearAlloc);
140 }
141 // CMSIndexedFreeListReplenish should be at least 1
142 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
143 _promoInfo.setSpace(this);
144 if (UseCMSBestFit) {
145 _fitStrategy = FreeBlockBestFitFirst;
146 } else {
147 _fitStrategy = FreeBlockStrategyNone;
148 }
149 check_free_list_consistency();
150
151 // Initialize locks for parallel case.
152
153 if (CollectedHeap::use_parallel_gc_threads()) {
154 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
155 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
156 "a freelist par lock",
157 true);
158 DEBUG_ONLY(
159 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
160 )
161 }
162 _dictionary->set_par_lock(&_parDictionaryAllocLock);
163 }
164 }
165
166 // Like CompactibleSpace forward() but always calls cross_threshold() to
167 // update the block offset table. Removed initialize_threshold call because
168 // CFLS does not use a block offset array for contiguous spaces.
169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
170 CompactPoint* cp, HeapWord* compact_top) {
171 // q is alive
172 // First check if we should switch compaction space
173 assert(this == cp->space, "'this' should be current compaction space.");
174 size_t compaction_max_size = pointer_delta(end(), compact_top);
175 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
176 "virtual adjustObjectSize_v() method is not correct");
177 size_t adjusted_size = adjustObjectSize(size);
178 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
179 "no small fragments allowed");
180 assert(minimum_free_block_size() == MinChunkSize,
181 "for de-virtualized reference below");
182 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
183 if (adjusted_size + MinChunkSize > compaction_max_size &&
605 HeapWord* prevEnd = end();
606 assert(prevEnd != value, "unnecessary set_end call");
607 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
608 "New end is below unallocated block");
609 _end = value;
610 if (prevEnd != NULL) {
611 // Resize the underlying block offset table.
612 _bt.resize(pointer_delta(value, bottom()));
613 if (value <= prevEnd) {
614 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
615 "New end is below unallocated block");
616 } else {
617 // Now, take this new chunk and add it to the free blocks.
618 // Note that the BOT has not yet been updated for this block.
619 size_t newFcSize = pointer_delta(value, prevEnd);
620 // XXX This is REALLY UGLY and should be fixed up. XXX
621 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
622 // Mark the boundary of the new block in BOT
623 _bt.mark_block(prevEnd, value);
624 // put it all in the linAB
625 if (ParallelGCThreads == 0) {
626 _smallLinearAllocBlock._ptr = prevEnd;
627 _smallLinearAllocBlock._word_size = newFcSize;
628 repairLinearAllocBlock(&_smallLinearAllocBlock);
629 } else { // ParallelGCThreads > 0
630 MutexLockerEx x(parDictionaryAllocLock(),
631 Mutex::_no_safepoint_check_flag);
632 _smallLinearAllocBlock._ptr = prevEnd;
633 _smallLinearAllocBlock._word_size = newFcSize;
634 repairLinearAllocBlock(&_smallLinearAllocBlock);
635 }
636 // Births of chunks put into a LinAB are not recorded. Births
637 // of chunks as they are allocated out of a LinAB are.
638 } else {
639 // Add the block to the free lists, if possible coalescing it
640 // with the last free block, and update the BOT and census data.
641 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
642 }
643 }
644 }
645 }
646
647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
648 CompactibleFreeListSpace* _cfls;
649 CMSCollector* _collector;
650 protected:
651 // Override.
652 #define walk_mem_region_with_cl_DECL(ClosureType) \
653 virtual void walk_mem_region_with_cl(MemRegion mr, \
654 HeapWord* bottom, HeapWord* top, \
655 ClosureType* cl); \
1723 }
1724 #ifndef PRODUCT
1725 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1726 _indexedFreeList[size].verify_stats();
1727 }
1728 #endif // PRODUCT
1729 }
1730
1731 // Add chunk to end of last block -- if it's the largest
1732 // block -- and update BOT and census data. We would
1733 // of course have preferred to coalesce it with the
1734 // last block, but it's currently less expensive to find the
1735 // largest block than it is to find the last.
1736 void
1737 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1738 HeapWord* chunk, size_t size) {
1739 // check that the chunk does lie in this space!
1740 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1741 // One of the parallel gc task threads may be here
1742 // whilst others are allocating.
1743 Mutex* lock = NULL;
1744 if (ParallelGCThreads != 0) {
1745 lock = &_parDictionaryAllocLock;
1746 }
1747 FreeChunk* ec;
1748 {
1749 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1750 ec = dictionary()->find_largest_dict(); // get largest block
1751 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1752 // It's a coterminal block - we can coalesce.
1753 size_t old_size = ec->size();
1754 coalDeath(old_size);
1755 removeChunkFromDictionary(ec);
1756 size += old_size;
1757 } else {
1758 ec = (FreeChunk*)chunk;
1759 }
1760 }
1761 ec->set_size(size);
1762 debug_only(ec->mangleFreed(size));
1763 if (size < SmallForDictionary && ParallelGCThreads != 0) {
1764 lock = _indexedFreeListParLocks[size];
1765 }
1766 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1767 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1768 // record the birth under the lock since the recording involves
1769 // manipulation of the list on which the chunk lives and
1770 // if the chunk is allocated and is the last on the list,
1771 // the list can go away.
1772 coalBirth(size);
1773 }
1774
1775 void
1776 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1777 size_t size) {
1778 // check that the chunk does lie in this space!
1779 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1780 assert_locked();
1781 _bt.verify_single_block(chunk, size);
1782
1783 FreeChunk* fc = (FreeChunk*) chunk;
|
132 _smallLinearAllocBlock.set(addr, fc->size() ,
133 1024*SmallForLinearAlloc, fc->size());
134 // Note that _unallocated_block is not updated here.
135 // Allocations from the linear allocation block should
136 // update it.
137 } else {
138 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
139 SmallForLinearAlloc);
140 }
141 // CMSIndexedFreeListReplenish should be at least 1
142 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
143 _promoInfo.setSpace(this);
144 if (UseCMSBestFit) {
145 _fitStrategy = FreeBlockBestFitFirst;
146 } else {
147 _fitStrategy = FreeBlockStrategyNone;
148 }
149 check_free_list_consistency();
150
151 // Initialize locks for parallel case.
152 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
153 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
154 "a freelist par lock",
155 true);
156 DEBUG_ONLY(
157 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
158 )
159 }
160 _dictionary->set_par_lock(&_parDictionaryAllocLock);
161 }
162
163 // Like CompactibleSpace forward() but always calls cross_threshold() to
164 // update the block offset table. Removed initialize_threshold call because
165 // CFLS does not use a block offset array for contiguous spaces.
166 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
167 CompactPoint* cp, HeapWord* compact_top) {
168 // q is alive
169 // First check if we should switch compaction space
170 assert(this == cp->space, "'this' should be current compaction space.");
171 size_t compaction_max_size = pointer_delta(end(), compact_top);
172 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
173 "virtual adjustObjectSize_v() method is not correct");
174 size_t adjusted_size = adjustObjectSize(size);
175 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
176 "no small fragments allowed");
177 assert(minimum_free_block_size() == MinChunkSize,
178 "for de-virtualized reference below");
179 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
180 if (adjusted_size + MinChunkSize > compaction_max_size &&
602 HeapWord* prevEnd = end();
603 assert(prevEnd != value, "unnecessary set_end call");
604 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
605 "New end is below unallocated block");
606 _end = value;
607 if (prevEnd != NULL) {
608 // Resize the underlying block offset table.
609 _bt.resize(pointer_delta(value, bottom()));
610 if (value <= prevEnd) {
611 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
612 "New end is below unallocated block");
613 } else {
614 // Now, take this new chunk and add it to the free blocks.
615 // Note that the BOT has not yet been updated for this block.
616 size_t newFcSize = pointer_delta(value, prevEnd);
617 // XXX This is REALLY UGLY and should be fixed up. XXX
618 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
619 // Mark the boundary of the new block in BOT
620 _bt.mark_block(prevEnd, value);
621 // put it all in the linAB
622 MutexLockerEx x(parDictionaryAllocLock(),
623 Mutex::_no_safepoint_check_flag);
624 _smallLinearAllocBlock._ptr = prevEnd;
625 _smallLinearAllocBlock._word_size = newFcSize;
626 repairLinearAllocBlock(&_smallLinearAllocBlock);
627 // Births of chunks put into a LinAB are not recorded. Births
628 // of chunks as they are allocated out of a LinAB are.
629 } else {
630 // Add the block to the free lists, if possible coalescing it
631 // with the last free block, and update the BOT and census data.
632 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
633 }
634 }
635 }
636 }
637
638 class FreeListSpace_DCTOC : public Filtering_DCTOC {
639 CompactibleFreeListSpace* _cfls;
640 CMSCollector* _collector;
641 protected:
642 // Override.
643 #define walk_mem_region_with_cl_DECL(ClosureType) \
644 virtual void walk_mem_region_with_cl(MemRegion mr, \
645 HeapWord* bottom, HeapWord* top, \
646 ClosureType* cl); \
1714 }
1715 #ifndef PRODUCT
1716 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1717 _indexedFreeList[size].verify_stats();
1718 }
1719 #endif // PRODUCT
1720 }
1721
1722 // Add chunk to end of last block -- if it's the largest
1723 // block -- and update BOT and census data. We would
1724 // of course have preferred to coalesce it with the
1725 // last block, but it's currently less expensive to find the
1726 // largest block than it is to find the last.
1727 void
1728 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1729 HeapWord* chunk, size_t size) {
1730 // check that the chunk does lie in this space!
1731 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1732 // One of the parallel gc task threads may be here
1733 // whilst others are allocating.
1734 Mutex* lock = &_parDictionaryAllocLock;
1735 FreeChunk* ec;
1736 {
1737 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1738 ec = dictionary()->find_largest_dict(); // get largest block
1739 if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1740 // It's a coterminal block - we can coalesce.
1741 size_t old_size = ec->size();
1742 coalDeath(old_size);
1743 removeChunkFromDictionary(ec);
1744 size += old_size;
1745 } else {
1746 ec = (FreeChunk*)chunk;
1747 }
1748 }
1749 ec->set_size(size);
1750 debug_only(ec->mangleFreed(size));
1751 if (size < SmallForDictionary) {
1752 lock = _indexedFreeListParLocks[size];
1753 }
1754 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1755 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1756 // record the birth under the lock since the recording involves
1757 // manipulation of the list on which the chunk lives and
1758 // if the chunk is allocated and is the last on the list,
1759 // the list can go away.
1760 coalBirth(size);
1761 }
1762
1763 void
1764 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1765 size_t size) {
1766 // check that the chunk does lie in this space!
1767 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1768 assert_locked();
1769 _bt.verify_single_block(chunk, size);
1770
1771 FreeChunk* fc = (FreeChunk*) chunk;
|