src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page
rev 6796 : [mq]: templateOopIterate
rev 6799 : [mq]: latestChanges


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"

  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"

  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.


 658                                        ClosureType* cl);                \
 659     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 660                                        HeapWord* bottom, HeapWord* top, \
 661                                        ClosureType* cl)
 662   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 663   walk_mem_region_with_cl_DECL(FilteringClosure);
 664 
 665 public:
 666   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 667                       CMSCollector* collector,
 668                       ExtendedOopClosure* cl,
 669                       CardTableModRefBS::PrecisionStyle precision,
 670                       HeapWord* boundary) :
 671     Filtering_DCTOC(sp, cl, precision, boundary),
 672     _cfls(sp), _collector(collector) {}
 673 };
 674 
 675 // We de-virtualize the block-related calls below, since we know that our
 676 // space is a CompactibleFreeListSpace.
 677 
 678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 680                                                  HeapWord* bottom,              \
 681                                                  HeapWord* top,                 \
 682                                                  ClosureType* cl) {             \
 683    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 684    if (is_par) {                                                                \
 685      assert(SharedHeap::heap()->n_par_threads() ==                              \
 686             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 687      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 688    } else {                                                                     \
 689      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 690    }                                                                            \
 691 }                                                                               \
 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 693                                                       HeapWord* bottom,         \
 694                                                       HeapWord* top,            \
 695                                                       ClosureType* cl) {        \
 696   /* Skip parts that are before "mr", in case "block_start" sent us             \
 697      back too far. */                                                           \
 698   HeapWord* mr_start = mr.start();                                              \
 699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 700   HeapWord* next = bottom + bot_size;                                           \
 701   while (next < mr_start) {                                                     \
 702     bottom = next;                                                              \
 703     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 704     next = bottom + bot_size;                                                   \
 705   }                                                                             \
 706                                                                                 \
 707   while (bottom < top) {                                                        \
 708     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 710                     oop(bottom)) &&                                             \
 711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 713       bottom += _cfls->adjustObjectSize(word_sz);                               \
 714     } else {                                                                    \
 715       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 716     }                                                                           \
 717   }                                                                             \
 718 }                                                                               \
 719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 720                                                         HeapWord* bottom,       \
 721                                                         HeapWord* top,          \
 722                                                         ClosureType* cl) {      \
 723   /* Skip parts that are before "mr", in case "block_start" sent us             \
 724      back too far. */                                                           \
 725   HeapWord* mr_start = mr.start();                                              \
 726   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 727   HeapWord* next = bottom + bot_size;                                           \
 728   while (next < mr_start) {                                                     \
 729     bottom = next;                                                              \
 730     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 731     next = bottom + bot_size;                                                   \
 732   }                                                                             \
 733                                                                                 \
 734   while (bottom < top) {                                                        \
 735     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 736         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 737                     oop(bottom)) &&                                             \
 738         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 739       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 740       bottom += _cfls->adjustObjectSize(word_sz);                               \
 741     } else {                                                                    \
 742       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 743     }                                                                           \
 744   }                                                                             \
 745 }
 746 
 747 // (There are only two of these, rather than N, because the split is due
 748 // only to the introduction of the FilteringClosure, a local part of the
 749 // impl of this abstraction.)
 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 752 
 753 DirtyCardToOopClosure*
 754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 755                                       CardTableModRefBS::PrecisionStyle precision,
 756                                       HeapWord* boundary) {
 757   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 758 }
 759 
 760 
 761 // Note on locking for the space iteration functions:
 762 // since the collector's iteration activities are concurrent with
 763 // allocation activities by mutators, absent a suitable mutual exclusion
 764 // mechanism the iterators may go awry. For instance a block being iterated
 765 // may suddenly be allocated or divided up and part of it allocated and
 766 // so on.
 767 
 768 // Apply the given closure to each block in the space.
 769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 770   assert_lock_strong(freelistLock());
 771   HeapWord *cur, *limit;


 773        cur += cl->do_blk_careful(cur));
 774 }
 775 
 776 // Apply the given closure to each block in the space.
 777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 778   assert_lock_strong(freelistLock());
 779   HeapWord *cur, *limit;
 780   for (cur = bottom(), limit = end(); cur < limit;
 781        cur += cl->do_blk(cur));
 782 }
 783 
 784 // Apply the given closure to each oop in the space.
 785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 786   assert_lock_strong(freelistLock());
 787   HeapWord *cur, *limit;
 788   size_t curSize;
 789   for (cur = bottom(), limit = end(); cur < limit;
 790        cur += curSize) {
 791     curSize = block_size(cur);
 792     if (block_is_obj(cur)) {
 793       oop(cur)->oop_iterate(cl);
 794     }
 795   }
 796 }
 797 
 798 // NOTE: In the following methods, in order to safely be able to
 799 // apply the closure to an object, we need to be sure that the
 800 // object has been initialized. We are guaranteed that an object
 801 // is initialized if we are holding the Heap_lock with the
 802 // world stopped.
 803 void CompactibleFreeListSpace::verify_objects_initialized() const {
 804   if (is_init_completed()) {
 805     assert_locked_or_safepoint(Heap_lock);
 806     if (Universe::is_fully_initialized()) {
 807       guarantee(SafepointSynchronize::is_at_safepoint(),
 808                 "Required for objects to be initialized");
 809     }
 810   } // else make a concession at vm start-up
 811 }
 812 
 813 // Apply the given closure to each object in the space


1980 #ifdef ASSERT
1981   // Check the sanity of save_marks() etc.
1982   MemRegion ur    = used_region();
1983   MemRegion urasm = used_region_at_save_marks();
1984   assert(ur.contains(urasm),
1985          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1986                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1987                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1988 #endif
1989   // inform allocator that promotions should be tracked.
1990   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1991   _promoInfo.startTrackingPromotions();
1992 }
1993 
1994 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1995   assert(_promoInfo.tracking(), "No preceding save_marks?");
1996   assert(SharedHeap::heap()->n_par_threads() == 0,
1997          "Shouldn't be called if using parallel gc.");
1998   return _promoInfo.noPromotions();
1999 }
2000 
2001 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2002                                                                             \
2003 void CompactibleFreeListSpace::                                             \
2004 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2005   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2006          "Shouldn't be called (yet) during parallel part of gc.");          \
2007   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2008   /*                                                                        \
2009    * This also restores any displaced headers and removes the elements from \
2010    * the iteration set as they are processed, so that we have a clean slate \
2011    * at the end of the iteration. Note, thus, that if new objects are       \
2012    * promoted as a result of the iteration they are iterated over as well.  \
2013    */                                                                       \
2014   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2015 }
2016 
2017 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2018 
2019 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2020   return _smallLinearAllocBlock._word_size == 0;
2021 }
2022 
2023 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2024   // Fix up linear allocation blocks to look like free blocks
2025   repairLinearAllocBlock(&_smallLinearAllocBlock);
2026 }
2027 
2028 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2029   assert_locked();
2030   if (blk->_ptr != NULL) {
2031     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2032            "Minimum block size requirement");
2033     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2034     fc->set_size(blk->_word_size);
2035     fc->link_prev(NULL);   // mark as free
2036     fc->dontCoalesce();
2037     assert(fc->is_free(), "just marked it free");




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp"
  31 #include "gc_implementation/shared/liveRange.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "gc_interface/collectedHeap.inline.hpp"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/blockOffsetTable.inline.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/space.inline.hpp"
  39 #include "memory/universe.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "runtime/vmThread.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 /////////////////////////////////////////////////////////////////////////
  50 //// CompactibleFreeListSpace
  51 /////////////////////////////////////////////////////////////////////////
  52 
  53 // highest ranked  free list lock rank
  54 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  55 
  56 // Defaults are 0 so things will break badly if incorrectly initialized.


 660                                        ClosureType* cl);                \
 661     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 662                                        HeapWord* bottom, HeapWord* top, \
 663                                        ClosureType* cl)
 664   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 665   walk_mem_region_with_cl_DECL(FilteringClosure);
 666 
 667 public:
 668   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 669                       CMSCollector* collector,
 670                       ExtendedOopClosure* cl,
 671                       CardTableModRefBS::PrecisionStyle precision,
 672                       HeapWord* boundary) :
 673     Filtering_DCTOC(sp, cl, precision, boundary),
 674     _cfls(sp), _collector(collector) {}
 675 };
 676 
 677 // We de-virtualize the block-related calls below, since we know that our
 678 // space is a CompactibleFreeListSpace.
 679 
 680 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType, nv)      \
 681 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 682                                                  HeapWord* bottom,              \
 683                                                  HeapWord* top,                 \
 684                                                  ClosureType* cl) {             \
 685    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 686    if (is_par) {                                                                \
 687      assert(SharedHeap::heap()->n_par_threads() ==                              \
 688             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 689      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 690    } else {                                                                     \
 691      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 692    }                                                                            \
 693 }                                                                               \
 694 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 695                                                       HeapWord* bottom,         \
 696                                                       HeapWord* top,            \
 697                                                       ClosureType* cl) {        \
 698   /* Skip parts that are before "mr", in case "block_start" sent us             \
 699      back too far. */                                                           \
 700   HeapWord* mr_start = mr.start();                                              \
 701   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 702   HeapWord* next = bottom + bot_size;                                           \
 703   while (next < mr_start) {                                                     \
 704     bottom = next;                                                              \
 705     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 706     next = bottom + bot_size;                                                   \
 707   }                                                                             \
 708                                                                                 \
 709   while (bottom < top) {                                                        \
 710     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 711         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 712                     oop(bottom)) &&                                             \
 713         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 714       size_t word_sz = oop(bottom)->oop_iterate<nv>(cl, mr);                        \
 715       bottom += _cfls->adjustObjectSize(word_sz);                               \
 716     } else {                                                                    \
 717       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 718     }                                                                           \
 719   }                                                                             \
 720 }                                                                               \
 721 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 722                                                         HeapWord* bottom,       \
 723                                                         HeapWord* top,          \
 724                                                         ClosureType* cl) {      \
 725   /* Skip parts that are before "mr", in case "block_start" sent us             \
 726      back too far. */                                                           \
 727   HeapWord* mr_start = mr.start();                                              \
 728   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 729   HeapWord* next = bottom + bot_size;                                           \
 730   while (next < mr_start) {                                                     \
 731     bottom = next;                                                              \
 732     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 733     next = bottom + bot_size;                                                   \
 734   }                                                                             \
 735                                                                                 \
 736   while (bottom < top) {                                                        \
 737     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 738         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 739                     oop(bottom)) &&                                             \
 740         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 741       size_t word_sz = oop(bottom)->oop_iterate<nv>(cl, mr);                        \
 742       bottom += _cfls->adjustObjectSize(word_sz);                               \
 743     } else {                                                                    \
 744       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 745     }                                                                           \
 746   }                                                                             \
 747 }
 748 
 749 // (There are only two of these, rather than N, because the split is due
 750 // only to the introduction of the FilteringClosure, a local part of the
 751 // impl of this abstraction.)
 752 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure, false)
 753 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure, true)
 754 
 755 DirtyCardToOopClosure*
 756 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 757                                       CardTableModRefBS::PrecisionStyle precision,
 758                                       HeapWord* boundary) {
 759   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 760 }
 761 
 762 
 763 // Note on locking for the space iteration functions:
 764 // since the collector's iteration activities are concurrent with
 765 // allocation activities by mutators, absent a suitable mutual exclusion
 766 // mechanism the iterators may go awry. For instance a block being iterated
 767 // may suddenly be allocated or divided up and part of it allocated and
 768 // so on.
 769 
 770 // Apply the given closure to each block in the space.
 771 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 772   assert_lock_strong(freelistLock());
 773   HeapWord *cur, *limit;


 775        cur += cl->do_blk_careful(cur));
 776 }
 777 
 778 // Apply the given closure to each block in the space.
 779 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 780   assert_lock_strong(freelistLock());
 781   HeapWord *cur, *limit;
 782   for (cur = bottom(), limit = end(); cur < limit;
 783        cur += cl->do_blk(cur));
 784 }
 785 
 786 // Apply the given closure to each oop in the space.
 787 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 788   assert_lock_strong(freelistLock());
 789   HeapWord *cur, *limit;
 790   size_t curSize;
 791   for (cur = bottom(), limit = end(); cur < limit;
 792        cur += curSize) {
 793     curSize = block_size(cur);
 794     if (block_is_obj(cur)) {
 795       oop(cur)->oop_iterate<false>(cl);
 796     }
 797   }
 798 }
 799 
 800 // NOTE: In the following methods, in order to safely be able to
 801 // apply the closure to an object, we need to be sure that the
 802 // object has been initialized. We are guaranteed that an object
 803 // is initialized if we are holding the Heap_lock with the
 804 // world stopped.
 805 void CompactibleFreeListSpace::verify_objects_initialized() const {
 806   if (is_init_completed()) {
 807     assert_locked_or_safepoint(Heap_lock);
 808     if (Universe::is_fully_initialized()) {
 809       guarantee(SafepointSynchronize::is_at_safepoint(),
 810                 "Required for objects to be initialized");
 811     }
 812   } // else make a concession at vm start-up
 813 }
 814 
 815 // Apply the given closure to each object in the space


1982 #ifdef ASSERT
1983   // Check the sanity of save_marks() etc.
1984   MemRegion ur    = used_region();
1985   MemRegion urasm = used_region_at_save_marks();
1986   assert(ur.contains(urasm),
1987          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1988                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1989                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1990 #endif
1991   // inform allocator that promotions should be tracked.
1992   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1993   _promoInfo.startTrackingPromotions();
1994 }
1995 
1996 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1997   assert(_promoInfo.tracking(), "No preceding save_marks?");
1998   assert(SharedHeap::heap()->n_par_threads() == 0,
1999          "Shouldn't be called if using parallel gc.");
2000   return _promoInfo.noPromotions();
2001 }


















2002 
2003 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2004   return _smallLinearAllocBlock._word_size == 0;
2005 }
2006 
2007 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2008   // Fix up linear allocation blocks to look like free blocks
2009   repairLinearAllocBlock(&_smallLinearAllocBlock);
2010 }
2011 
2012 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2013   assert_locked();
2014   if (blk->_ptr != NULL) {
2015     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2016            "Minimum block size requirement");
2017     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2018     fc->set_size(blk->_word_size);
2019     fc->link_prev(NULL);   // mark as free
2020     fc->dontCoalesce();
2021     assert(fc->is_free(), "just marked it free");