src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc-g1-mmap Sdiff src/share/vm/gc_implementation/concurrentMarkSweep

src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page




 136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 137                                SmallForLinearAlloc);
 138   }
 139   // CMSIndexedFreeListReplenish should be at least 1
 140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 141   _promoInfo.setSpace(this);
 142   if (UseCMSBestFit) {
 143     _fitStrategy = FreeBlockBestFitFirst;
 144   } else {
 145     _fitStrategy = FreeBlockStrategyNone;
 146   }
 147   check_free_list_consistency();
 148 
 149   // Initialize locks for parallel case.
 150 
 151   if (CollectedHeap::use_parallel_gc_threads()) {
 152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                               "a freelist par lock",
 155                                               true);
 156       if (_indexedFreeListParLocks[i] == NULL)
 157         vm_exit_during_initialization("Could not allocate a par lock");
 158       DEBUG_ONLY(
 159         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160       )
 161     }
 162     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163   }
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);


1745   Mutex* lock = NULL;
1746   if (ParallelGCThreads != 0) {
1747     lock = &_parDictionaryAllocLock;
1748   }
1749   FreeChunk* ec;
1750   {
1751     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1752     ec = dictionary()->find_largest_dict();  // get largest block
1753     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1754       // It's a coterminal block - we can coalesce.
1755       size_t old_size = ec->size();
1756       coalDeath(old_size);
1757       removeChunkFromDictionary(ec);
1758       size += old_size;
1759     } else {
1760       ec = (FreeChunk*)chunk;
1761     }
1762   }
1763   ec->set_size(size);
1764   debug_only(ec->mangleFreed(size));
1765   if (size < SmallForDictionary) {
1766     lock = _indexedFreeListParLocks[size];
1767   }
1768   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1769   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1770   // record the birth under the lock since the recording involves
1771   // manipulation of the list on which the chunk lives and
1772   // if the chunk is allocated and is the last on the list,
1773   // the list can go away.
1774   coalBirth(size);
1775 }
1776 
1777 void
1778 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1779                                               size_t     size) {
1780   // check that the chunk does lie in this space!
1781   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1782   assert_locked();
1783   _bt.verify_single_block(chunk, size);
1784 
1785   FreeChunk* fc = (FreeChunk*) chunk;




 136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 137                                SmallForLinearAlloc);
 138   }
 139   // CMSIndexedFreeListReplenish should be at least 1
 140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 141   _promoInfo.setSpace(this);
 142   if (UseCMSBestFit) {
 143     _fitStrategy = FreeBlockBestFitFirst;
 144   } else {
 145     _fitStrategy = FreeBlockStrategyNone;
 146   }
 147   check_free_list_consistency();
 148 
 149   // Initialize locks for parallel case.
 150 
 151   if (CollectedHeap::use_parallel_gc_threads()) {
 152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                               "a freelist par lock",
 155                                               true);


 156       DEBUG_ONLY(
 157         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 158       )
 159     }
 160     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 161   }
 162 }
 163 
 164 // Like CompactibleSpace forward() but always calls cross_threshold() to
 165 // update the block offset table.  Removed initialize_threshold call because
 166 // CFLS does not use a block offset array for contiguous spaces.
 167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 168                                     CompactPoint* cp, HeapWord* compact_top) {
 169   // q is alive
 170   // First check if we should switch compaction space
 171   assert(this == cp->space, "'this' should be current compaction space.");
 172   size_t compaction_max_size = pointer_delta(end(), compact_top);
 173   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 174     "virtual adjustObjectSize_v() method is not correct");
 175   size_t adjusted_size = adjustObjectSize(size);


1743   Mutex* lock = NULL;
1744   if (ParallelGCThreads != 0) {
1745     lock = &_parDictionaryAllocLock;
1746   }
1747   FreeChunk* ec;
1748   {
1749     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1750     ec = dictionary()->find_largest_dict();  // get largest block
1751     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1752       // It's a coterminal block - we can coalesce.
1753       size_t old_size = ec->size();
1754       coalDeath(old_size);
1755       removeChunkFromDictionary(ec);
1756       size += old_size;
1757     } else {
1758       ec = (FreeChunk*)chunk;
1759     }
1760   }
1761   ec->set_size(size);
1762   debug_only(ec->mangleFreed(size));
1763   if (size < SmallForDictionary && ParallelGCThreads != 0) {
1764     lock = _indexedFreeListParLocks[size];
1765   }
1766   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1767   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1768   // record the birth under the lock since the recording involves
1769   // manipulation of the list on which the chunk lives and
1770   // if the chunk is allocated and is the last on the list,
1771   // the list can go away.
1772   coalBirth(size);
1773 }
1774 
1775 void
1776 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1777                                               size_t     size) {
1778   // check that the chunk does lie in this space!
1779   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1780   assert_locked();
1781   _bt.verify_single_block(chunk, size);
1782 
1783   FreeChunk* fc = (FreeChunk*) chunk;


src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File