< prev index next >

src/share/vm/gc/cms/compactibleFreeListSpace.cpp

Print this page




 624         // put it all in the linAB
 625         MutexLockerEx x(parDictionaryAllocLock(),
 626                         Mutex::_no_safepoint_check_flag);
 627         _smallLinearAllocBlock._ptr = prevEnd;
 628         _smallLinearAllocBlock._word_size = newFcSize;
 629         repairLinearAllocBlock(&_smallLinearAllocBlock);
 630         // Births of chunks put into a LinAB are not recorded.  Births
 631         // of chunks as they are allocated out of a LinAB are.
 632       } else {
 633         // Add the block to the free lists, if possible coalescing it
 634         // with the last free block, and update the BOT and census data.
 635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 636       }
 637     }
 638   }
 639 }
 640 
 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 642   CompactibleFreeListSpace* _cfls;
 643   CMSCollector* _collector;

 644 protected:
 645   // Override.
 646 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 647   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 648                                        HeapWord* bottom, HeapWord* top, \
 649                                        ClosureType* cl);                \
 650       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 651                                        HeapWord* bottom, HeapWord* top, \
 652                                        ClosureType* cl);                \
 653     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl)
 656   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 657   walk_mem_region_with_cl_DECL(FilteringClosure);
 658 
 659 public:
 660   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 661                       CMSCollector* collector,
 662                       ExtendedOopClosure* cl,
 663                       CardTableModRefBS::PrecisionStyle precision,
 664                       HeapWord* boundary) :

 665     Filtering_DCTOC(sp, cl, precision, boundary),
 666     _cfls(sp), _collector(collector) {}
 667 };
 668 
 669 // We de-virtualize the block-related calls below, since we know that our
 670 // space is a CompactibleFreeListSpace.
 671 
 672 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 673 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 674                                                  HeapWord* bottom,              \
 675                                                  HeapWord* top,                 \
 676                                                  ClosureType* cl) {             \
 677    bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0;                 \
 678    if (is_par) {                                                                \
 679      assert(GenCollectedHeap::heap()->n_par_threads() ==                        \
 680             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
 681      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 682    } else {                                                                     \
 683      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 684    }                                                                            \
 685 }                                                                               \
 686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 687                                                       HeapWord* bottom,         \
 688                                                       HeapWord* top,            \
 689                                                       ClosureType* cl) {        \
 690   /* Skip parts that are before "mr", in case "block_start" sent us             \
 691      back too far. */                                                           \
 692   HeapWord* mr_start = mr.start();                                              \
 693   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 694   HeapWord* next = bottom + bot_size;                                           \
 695   while (next < mr_start) {                                                     \
 696     bottom = next;                                                              \
 697     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 698     next = bottom + bot_size;                                                   \
 699   }                                                                             \
 700                                                                                 \


 730         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 731                     oop(bottom)) &&                                             \
 732         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 733       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 734       bottom += _cfls->adjustObjectSize(word_sz);                               \
 735     } else {                                                                    \
 736       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 737     }                                                                           \
 738   }                                                                             \
 739 }
 740 
 741 // (There are only two of these, rather than N, because the split is due
 742 // only to the introduction of the FilteringClosure, a local part of the
 743 // impl of this abstraction.)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 746 
 747 DirtyCardToOopClosure*
 748 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 749                                       CardTableModRefBS::PrecisionStyle precision,
 750                                       HeapWord* boundary) {
 751   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);

 752 }
 753 
 754 
 755 // Note on locking for the space iteration functions:
 756 // since the collector's iteration activities are concurrent with
 757 // allocation activities by mutators, absent a suitable mutual exclusion
 758 // mechanism the iterators may go awry. For instance a block being iterated
 759 // may suddenly be allocated or divided up and part of it allocated and
 760 // so on.
 761 
 762 // Apply the given closure to each block in the space.
 763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 764   assert_lock_strong(freelistLock());
 765   HeapWord *cur, *limit;
 766   for (cur = bottom(), limit = end(); cur < limit;
 767        cur += cl->do_blk_careful(cur));
 768 }
 769 
 770 // Apply the given closure to each block in the space.
 771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {


1880   size_t new_size) {
1881   assert_locked();
1882   size_t size = chunk->size();
1883   assert(size > new_size, "Split from a smaller block?");
1884   assert(is_aligned(chunk), "alignment problem");
1885   assert(size == adjustObjectSize(size), "alignment problem");
1886   size_t rem_sz = size - new_size;
1887   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1888   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1889   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1890   assert(is_aligned(ffc), "alignment problem");
1891   ffc->set_size(rem_sz);
1892   ffc->link_next(NULL);
1893   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1894   // Above must occur before BOT is updated below.
1895   // adjust block offset table
1896   OrderAccess::storestore();
1897   assert(chunk->is_free() && ffc->is_free(), "Error");
1898   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1899   if (rem_sz < SmallForDictionary) {
1900     bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);

1901     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1902     assert(!is_par ||
1903            (GenCollectedHeap::heap()->n_par_threads() ==
1904             GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
1905     returnChunkToFreeList(ffc);
1906     split(size, rem_sz);
1907     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1908   } else {
1909     returnChunkToDictionary(ffc);
1910     split(size, rem_sz);
1911   }
1912   chunk->set_size(new_size);
1913   return chunk;
1914 }
1915 
1916 void
1917 CompactibleFreeListSpace::sweep_completed() {
1918   // Now that space is probably plentiful, refill linear
1919   // allocation blocks as needed.
1920   refillLinearAllocBlocksIfNeeded();
1921 }
1922 
1923 void
1924 CompactibleFreeListSpace::gc_prologue() {


1955   // Mark the "end" of the used space at the time of this call;
1956   // note, however, that promoted objects from this point
1957   // on are tracked in the _promoInfo below.
1958   set_saved_mark_word(unallocated_block());
1959 #ifdef ASSERT
1960   // Check the sanity of save_marks() etc.
1961   MemRegion ur    = used_region();
1962   MemRegion urasm = used_region_at_save_marks();
1963   assert(ur.contains(urasm),
1964          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1965                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1966                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1967 #endif
1968   // inform allocator that promotions should be tracked.
1969   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1970   _promoInfo.startTrackingPromotions();
1971 }
1972 
1973 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1974   assert(_promoInfo.tracking(), "No preceding save_marks?");
1975   assert(GenCollectedHeap::heap()->n_par_threads() == 0,
1976          "Shouldn't be called if using parallel gc.");
1977   return _promoInfo.noPromotions();
1978 }
1979 
1980 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1981                                                                             \
1982 void CompactibleFreeListSpace::                                             \
1983 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1984   assert(GenCollectedHeap::heap()->n_par_threads() == 0,                    \
1985          "Shouldn't be called (yet) during parallel part of gc.");          \
1986   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1987   /*                                                                        \
1988    * This also restores any displaced headers and removes the elements from \
1989    * the iteration set as they are processed, so that we have a clean slate \
1990    * at the end of the iteration. Note, thus, that if new objects are       \
1991    * promoted as a result of the iteration they are iterated over as well.  \
1992    */                                                                       \
1993   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1994 }
1995 
1996 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1997 
1998 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1999   return _smallLinearAllocBlock._word_size == 0;
2000 }
2001 
2002 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2003   // Fix up linear allocation blocks to look like free blocks
2004   repairLinearAllocBlock(&_smallLinearAllocBlock);
2005 }




 624         // put it all in the linAB
 625         MutexLockerEx x(parDictionaryAllocLock(),
 626                         Mutex::_no_safepoint_check_flag);
 627         _smallLinearAllocBlock._ptr = prevEnd;
 628         _smallLinearAllocBlock._word_size = newFcSize;
 629         repairLinearAllocBlock(&_smallLinearAllocBlock);
 630         // Births of chunks put into a LinAB are not recorded.  Births
 631         // of chunks as they are allocated out of a LinAB are.
 632       } else {
 633         // Add the block to the free lists, if possible coalescing it
 634         // with the last free block, and update the BOT and census data.
 635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 636       }
 637     }
 638   }
 639 }
 640 
 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 642   CompactibleFreeListSpace* _cfls;
 643   CMSCollector* _collector;
 644   bool _parallel;
 645 protected:
 646   // Override.
 647 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 648   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 649                                        HeapWord* bottom, HeapWord* top, \
 650                                        ClosureType* cl);                \
 651       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 652                                        HeapWord* bottom, HeapWord* top, \
 653                                        ClosureType* cl);                \
 654     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 655                                        HeapWord* bottom, HeapWord* top, \
 656                                        ClosureType* cl)
 657   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 658   walk_mem_region_with_cl_DECL(FilteringClosure);
 659 
 660 public:
 661   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 662                       CMSCollector* collector,
 663                       ExtendedOopClosure* cl,
 664                       CardTableModRefBS::PrecisionStyle precision,
 665                       HeapWord* boundary,
 666                       bool parallel) :
 667     Filtering_DCTOC(sp, cl, precision, boundary),
 668     _cfls(sp), _collector(collector), _parallel(parallel) {}
 669 };
 670 
 671 // We de-virtualize the block-related calls below, since we know that our
 672 // space is a CompactibleFreeListSpace.
 673 
 674 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 675 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 676                                                  HeapWord* bottom,              \
 677                                                  HeapWord* top,                 \
 678                                                  ClosureType* cl) {             \
 679    if (_parallel) {                                                             \



 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \


 729         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 730                     oop(bottom)) &&                                             \
 731         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 732       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 733       bottom += _cfls->adjustObjectSize(word_sz);                               \
 734     } else {                                                                    \
 735       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 736     }                                                                           \
 737   }                                                                             \
 738 }
 739 
 740 // (There are only two of these, rather than N, because the split is due
 741 // only to the introduction of the FilteringClosure, a local part of the
 742 // impl of this abstraction.)
 743 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 745 
 746 DirtyCardToOopClosure*
 747 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 748                                       CardTableModRefBS::PrecisionStyle precision,
 749                                       HeapWord* boundary,
 750                                       bool parallel) {
 751   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
 752 }
 753 
 754 
 755 // Note on locking for the space iteration functions:
 756 // since the collector's iteration activities are concurrent with
 757 // allocation activities by mutators, absent a suitable mutual exclusion
 758 // mechanism the iterators may go awry. For instance a block being iterated
 759 // may suddenly be allocated or divided up and part of it allocated and
 760 // so on.
 761 
 762 // Apply the given closure to each block in the space.
 763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 764   assert_lock_strong(freelistLock());
 765   HeapWord *cur, *limit;
 766   for (cur = bottom(), limit = end(); cur < limit;
 767        cur += cl->do_blk_careful(cur));
 768 }
 769 
 770 // Apply the given closure to each block in the space.
 771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {


1880   size_t new_size) {
1881   assert_locked();
1882   size_t size = chunk->size();
1883   assert(size > new_size, "Split from a smaller block?");
1884   assert(is_aligned(chunk), "alignment problem");
1885   assert(size == adjustObjectSize(size), "alignment problem");
1886   size_t rem_sz = size - new_size;
1887   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1888   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1889   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1890   assert(is_aligned(ffc), "alignment problem");
1891   ffc->set_size(rem_sz);
1892   ffc->link_next(NULL);
1893   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1894   // Above must occur before BOT is updated below.
1895   // adjust block offset table
1896   OrderAccess::storestore();
1897   assert(chunk->is_free() && ffc->is_free(), "Error");
1898   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1899   if (rem_sz < SmallForDictionary) {
1900     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1901     bool is_par = Thread::current()->is_GC_task_thread();
1902     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();



1903     returnChunkToFreeList(ffc);
1904     split(size, rem_sz);
1905     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1906   } else {
1907     returnChunkToDictionary(ffc);
1908     split(size, rem_sz);
1909   }
1910   chunk->set_size(new_size);
1911   return chunk;
1912 }
1913 
1914 void
1915 CompactibleFreeListSpace::sweep_completed() {
1916   // Now that space is probably plentiful, refill linear
1917   // allocation blocks as needed.
1918   refillLinearAllocBlocksIfNeeded();
1919 }
1920 
1921 void
1922 CompactibleFreeListSpace::gc_prologue() {


1953   // Mark the "end" of the used space at the time of this call;
1954   // note, however, that promoted objects from this point
1955   // on are tracked in the _promoInfo below.
1956   set_saved_mark_word(unallocated_block());
1957 #ifdef ASSERT
1958   // Check the sanity of save_marks() etc.
1959   MemRegion ur    = used_region();
1960   MemRegion urasm = used_region_at_save_marks();
1961   assert(ur.contains(urasm),
1962          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1963                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1964                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1965 #endif
1966   // inform allocator that promotions should be tracked.
1967   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1968   _promoInfo.startTrackingPromotions();
1969 }
1970 
1971 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1972   assert(_promoInfo.tracking(), "No preceding save_marks?");


1973   return _promoInfo.noPromotions();
1974 }
1975 
1976 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1977                                                                             \
1978 void CompactibleFreeListSpace::                                             \
1979 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \


1980   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1981   /*                                                                        \
1982    * This also restores any displaced headers and removes the elements from \
1983    * the iteration set as they are processed, so that we have a clean slate \
1984    * at the end of the iteration. Note, thus, that if new objects are       \
1985    * promoted as a result of the iteration they are iterated over as well.  \
1986    */                                                                       \
1987   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1988 }
1989 
1990 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1991 
1992 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1993   return _smallLinearAllocBlock._word_size == 0;
1994 }
1995 
1996 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1997   // Fix up linear allocation blocks to look like free blocks
1998   repairLinearAllocBlock(&_smallLinearAllocBlock);
1999 }


< prev index next >