< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page




 656   walk_mem_region_with_cl_DECL(FilteringClosure);
 657 
 658 public:
 659   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 660                       CMSCollector* collector,
 661                       ExtendedOopClosure* cl,
 662                       CardTableModRefBS::PrecisionStyle precision,
 663                       HeapWord* boundary) :
 664     Filtering_DCTOC(sp, cl, precision, boundary),
 665     _cfls(sp), _collector(collector) {}
 666 };
 667 
 668 // We de-virtualize the block-related calls below, since we know that our
 669 // space is a CompactibleFreeListSpace.
 670 
 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 673                                                  HeapWord* bottom,              \
 674                                                  HeapWord* top,                 \
 675                                                  ClosureType* cl) {             \
 676    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 677    if (is_par) {                                                                \
 678      assert(SharedHeap::heap()->n_par_threads() ==                              \
 679             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \


1890   size_t new_size) {
1891   assert_locked();
1892   size_t size = chunk->size();
1893   assert(size > new_size, "Split from a smaller block?");
1894   assert(is_aligned(chunk), "alignment problem");
1895   assert(size == adjustObjectSize(size), "alignment problem");
1896   size_t rem_sz = size - new_size;
1897   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1898   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1899   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1900   assert(is_aligned(ffc), "alignment problem");
1901   ffc->set_size(rem_sz);
1902   ffc->link_next(NULL);
1903   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1904   // Above must occur before BOT is updated below.
1905   // adjust block offset table
1906   OrderAccess::storestore();
1907   assert(chunk->is_free() && ffc->is_free(), "Error");
1908   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1909   if (rem_sz < SmallForDictionary) {
1910     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1911     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1912     assert(!is_par ||
1913            (SharedHeap::heap()->n_par_threads() ==
1914             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1915     returnChunkToFreeList(ffc);
1916     split(size, rem_sz);
1917     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1918   } else {
1919     returnChunkToDictionary(ffc);
1920     split(size, rem_sz);
1921   }
1922   chunk->set_size(new_size);
1923   return chunk;
1924 }
1925 
1926 void
1927 CompactibleFreeListSpace::sweep_completed() {
1928   // Now that space is probably plentiful, refill linear
1929   // allocation blocks as needed.
1930   refillLinearAllocBlocksIfNeeded();
1931 }
1932 
1933 void
1934 CompactibleFreeListSpace::gc_prologue() {


1965   // Mark the "end" of the used space at the time of this call;
1966   // note, however, that promoted objects from this point
1967   // on are tracked in the _promoInfo below.
1968   set_saved_mark_word(unallocated_block());
1969 #ifdef ASSERT
1970   // Check the sanity of save_marks() etc.
1971   MemRegion ur    = used_region();
1972   MemRegion urasm = used_region_at_save_marks();
1973   assert(ur.contains(urasm),
1974          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1975                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1976                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1977 #endif
1978   // inform allocator that promotions should be tracked.
1979   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1980   _promoInfo.startTrackingPromotions();
1981 }
1982 
1983 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1984   assert(_promoInfo.tracking(), "No preceding save_marks?");
1985   assert(SharedHeap::heap()->n_par_threads() == 0,
1986          "Shouldn't be called if using parallel gc.");
1987   return _promoInfo.noPromotions();
1988 }
1989 
1990 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1991                                                                             \
1992 void CompactibleFreeListSpace::                                             \
1993 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1994   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1995          "Shouldn't be called (yet) during parallel part of gc.");          \
1996   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1997   /*                                                                        \
1998    * This also restores any displaced headers and removes the elements from \
1999    * the iteration set as they are processed, so that we have a clean slate \
2000    * at the end of the iteration. Note, thus, that if new objects are       \
2001    * promoted as a result of the iteration they are iterated over as well.  \
2002    */                                                                       \
2003   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2004 }
2005 
2006 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2007 
2008 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2009   return _smallLinearAllocBlock._word_size == 0;
2010 }
2011 
2012 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2013   // Fix up linear allocation blocks to look like free blocks
2014   repairLinearAllocBlock(&_smallLinearAllocBlock);




 656   walk_mem_region_with_cl_DECL(FilteringClosure);
 657 
 658 public:
 659   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 660                       CMSCollector* collector,
 661                       ExtendedOopClosure* cl,
 662                       CardTableModRefBS::PrecisionStyle precision,
 663                       HeapWord* boundary) :
 664     Filtering_DCTOC(sp, cl, precision, boundary),
 665     _cfls(sp), _collector(collector) {}
 666 };
 667 
 668 // We de-virtualize the block-related calls below, since we know that our
 669 // space is a CompactibleFreeListSpace.
 670 
 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 673                                                  HeapWord* bottom,              \
 674                                                  HeapWord* top,                 \
 675                                                  ClosureType* cl) {             \
 676    bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0;                 \
 677    if (is_par) {                                                                \
 678      assert(GenCollectedHeap::heap()->n_par_threads() ==                        \
 679             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \


1890   size_t new_size) {
1891   assert_locked();
1892   size_t size = chunk->size();
1893   assert(size > new_size, "Split from a smaller block?");
1894   assert(is_aligned(chunk), "alignment problem");
1895   assert(size == adjustObjectSize(size), "alignment problem");
1896   size_t rem_sz = size - new_size;
1897   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1898   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1899   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1900   assert(is_aligned(ffc), "alignment problem");
1901   ffc->set_size(rem_sz);
1902   ffc->link_next(NULL);
1903   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1904   // Above must occur before BOT is updated below.
1905   // adjust block offset table
1906   OrderAccess::storestore();
1907   assert(chunk->is_free() && ffc->is_free(), "Error");
1908   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1909   if (rem_sz < SmallForDictionary) {
1910     bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
1911     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1912     assert(!is_par ||
1913            (GenCollectedHeap::heap()->n_par_threads() ==
1914             GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
1915     returnChunkToFreeList(ffc);
1916     split(size, rem_sz);
1917     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1918   } else {
1919     returnChunkToDictionary(ffc);
1920     split(size, rem_sz);
1921   }
1922   chunk->set_size(new_size);
1923   return chunk;
1924 }
1925 
1926 void
1927 CompactibleFreeListSpace::sweep_completed() {
1928   // Now that space is probably plentiful, refill linear
1929   // allocation blocks as needed.
1930   refillLinearAllocBlocksIfNeeded();
1931 }
1932 
1933 void
1934 CompactibleFreeListSpace::gc_prologue() {


1965   // Mark the "end" of the used space at the time of this call;
1966   // note, however, that promoted objects from this point
1967   // on are tracked in the _promoInfo below.
1968   set_saved_mark_word(unallocated_block());
1969 #ifdef ASSERT
1970   // Check the sanity of save_marks() etc.
1971   MemRegion ur    = used_region();
1972   MemRegion urasm = used_region_at_save_marks();
1973   assert(ur.contains(urasm),
1974          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1975                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1976                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1977 #endif
1978   // inform allocator that promotions should be tracked.
1979   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1980   _promoInfo.startTrackingPromotions();
1981 }
1982 
1983 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1984   assert(_promoInfo.tracking(), "No preceding save_marks?");
1985   assert(GenCollectedHeap::heap()->n_par_threads() == 0,
1986          "Shouldn't be called if using parallel gc.");
1987   return _promoInfo.noPromotions();
1988 }
1989 
1990 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1991                                                                             \
1992 void CompactibleFreeListSpace::                                             \
1993 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1994   assert(GenCollectedHeap::heap()->n_par_threads() == 0,                    \
1995          "Shouldn't be called (yet) during parallel part of gc.");          \
1996   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1997   /*                                                                        \
1998    * This also restores any displaced headers and removes the elements from \
1999    * the iteration set as they are processed, so that we have a clean slate \
2000    * at the end of the iteration. Note, thus, that if new objects are       \
2001    * promoted as a result of the iteration they are iterated over as well.  \
2002    */                                                                       \
2003   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2004 }
2005 
2006 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2007 
2008 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2009   return _smallLinearAllocBlock._word_size == 0;
2010 }
2011 
2012 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2013   // Fix up linear allocation blocks to look like free blocks
2014   repairLinearAllocBlock(&_smallLinearAllocBlock);


< prev index next >